diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index af4dec5..f88f3d9 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -23,8 +23,12 @@ jobs: strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.10", "3.11", "3.12", "3.13"] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} - uses: astral-sh/setup-uv@v3 - run: uv run pytest diff --git a/CHANGELOG.md b/CHANGELOG.md index 47a7704..c0de48e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) +## [0.5.1] +### Fixed +- Fixed bug where an incorrect thread ID was referenced causing chats to fail +- Add support for Python >= 3.10 + ## [0.5.0] ### Added - Correct citation text in terminal and Gradio output diff --git a/pyproject.toml b/pyproject.toml index 041d05c..30a49d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,12 @@ [project] name = "vecsync" -version = "0.5.0" +version = "0.5.1" description = "A simple command-line utility for synchronizing documents to vector storage for LLM interaction." readme = "README.md" authors = [ { name = "John Bencina", email = "jbencina@users.noreply.github.com" } ] -requires-python = ">=3.13" +requires-python = ">=3.10" license = { file = "LICENSE" } keywords = [ @@ -35,6 +35,9 @@ classifiers = [ "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Topic :: Utilities", diff --git a/src/vecsync/chat/clients/openai.py b/src/vecsync/chat/clients/openai.py index 98215a5..3173aa1 100644 --- a/src/vecsync/chat/clients/openai.py +++ b/src/vecsync/chat/clients/openai.py @@ -154,10 +154,10 @@ def _run_stream(self, handler: OpenAIHandler): ) as stream: stream.until_done() - def send_message(self, thread_id: str, prompt: str): + def send_message(self, prompt: str): self.initialize_chat() - return self.client.beta.threads.messages.create(thread_id=thread_id, role="user", content=prompt) + return self.client.beta.threads.messages.create(thread_id=self.thread_id, role="user", content=prompt) def stream_response(self, thread_id: str, assistant_id: str, handler): with self.client.beta.threads.runs.stream( diff --git a/src/vecsync/chat/interface.py b/src/vecsync/chat/interface.py index 7bed4db..5cd44ad 100644 --- a/src/vecsync/chat/interface.py +++ b/src/vecsync/chat/interface.py @@ -16,7 +16,7 @@ def prompt(self, prompt_text: str): fmt = ConsoleFormatter() handler = OpenAIHandler(self.client.files, fmt) - self.client.send_message(self.client.thread_id, prompt_text) + self.client.send_message(prompt_text) self.executor.submit(self.client.stream_response, self.client.thread_id, self.client.assistant_id, handler) for chunk in handler.consume_queue(): @@ -34,7 +34,7 @@ def gradio_prompt(message, history): fmt = GradioFormatter() handler = OpenAIHandler(self.client.files, fmt) - self.client.send_message(self.client.thread_id, message) + self.client.send_message(message) self.executor.submit(self.client.stream_response, self.client.thread_id, self.client.assistant_id, handler) response = ""