Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,12 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ["3.10", "3.11", "3.12", "3.13"]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v3
- run: uv run pytest
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/)

## [0.5.1]
### Fixed
- Fixed bug where an incorrect thread ID was referenced causing chats to fail
- Add support for Python >= 3.10

## [0.5.0]
### Added
- Correct citation text in terminal and Gradio output
Expand Down
7 changes: 5 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
[project]
name = "vecsync"
version = "0.5.0"
version = "0.5.1"
description = "A simple command-line utility for synchronizing documents to vector storage for LLM interaction."
readme = "README.md"
authors = [
{ name = "John Bencina", email = "jbencina@users.noreply.github.com" }
]
requires-python = ">=3.13"
requires-python = ">=3.10"
license = { file = "LICENSE" }

keywords = [
Expand Down Expand Up @@ -35,6 +35,9 @@ classifiers = [
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Topic :: Utilities",
Expand Down
4 changes: 2 additions & 2 deletions src/vecsync/chat/clients/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,10 @@ def _run_stream(self, handler: OpenAIHandler):
) as stream:
stream.until_done()

def send_message(self, thread_id: str, prompt: str):
def send_message(self, prompt: str):
self.initialize_chat()

return self.client.beta.threads.messages.create(thread_id=thread_id, role="user", content=prompt)
return self.client.beta.threads.messages.create(thread_id=self.thread_id, role="user", content=prompt)

def stream_response(self, thread_id: str, assistant_id: str, handler):
with self.client.beta.threads.runs.stream(
Expand Down
4 changes: 2 additions & 2 deletions src/vecsync/chat/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def prompt(self, prompt_text: str):
fmt = ConsoleFormatter()
handler = OpenAIHandler(self.client.files, fmt)

self.client.send_message(self.client.thread_id, prompt_text)
self.client.send_message(prompt_text)

self.executor.submit(self.client.stream_response, self.client.thread_id, self.client.assistant_id, handler)
for chunk in handler.consume_queue():
Expand All @@ -34,7 +34,7 @@ def gradio_prompt(message, history):
fmt = GradioFormatter()
handler = OpenAIHandler(self.client.files, fmt)

self.client.send_message(self.client.thread_id, message)
self.client.send_message(message)

self.executor.submit(self.client.stream_response, self.client.thread_id, self.client.assistant_id, handler)
response = ""
Expand Down