From 7277488bfd55f0b7a4be52b22482d62c05bff3f3 Mon Sep 17 00:00:00 2001 From: Chandu Neerati <129150574+NeeratiChandu@users.noreply.github.com> Date: Fri, 27 Mar 2026 15:13:16 +0530 Subject: [PATCH] Update llm_client.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements**: Switchable OpenAI ↔ Claude + Factory pattern --- app/models/llm_client.py | 108 +++++++++++++++++++++++++++++++-------- 1 file changed, 87 insertions(+), 21 deletions(-) diff --git a/app/models/llm_client.py b/app/models/llm_client.py index be255bc..805a786 100644 --- a/app/models/llm_client.py +++ b/app/models/llm_client.py @@ -1,27 +1,93 @@ -import os -class LLMClient: - def __init__(self, api_key: str = None, model_name: str = None): - openai_key = api_key or os.getenv("OPENAI_API_KEY") - groq_key = os.getenv("GROQ_API_KEY") +""" +Concrete LLM implementations (OpenAI + Anthropic) +Week 2: Switchable LLM client +""" - if openai_key: - from openai import OpenAI - self.model_name = model_name or "gpt-4o-mini" - self._client = OpenAI(api_key=openai_key) - self._provider = "openai" - elif groq_key: - from groq import Groq - self.model_name = model_name or "llama-3.3-70b-versatile" - self._client = Groq(api_key=groq_key) - self._provider = "groq" - else: - raise ValueError("No API key found. Set OPENAI_API_KEY or GROQ_API_KEY in .env") +import openai +import anthropic +from typing import List, Dict, Any, AsyncGenerator +import asyncio +from app.models.base import LLMClient, BaseLLMResponse +from app.config.settings import settings - def chat(self, messages: list[dict], **kwargs) -> str: - response = self._client.chat.completions.create( - model=self.model_name, +class OpenAILLM(LLMClient): + def __init__(self): + self.client = openai.AsyncOpenAI(api_key=settings.openai_api_key) + + async def chat( + self, + messages: List[Dict[str, str]], + model: str = "gpt-4o-mini", + **kwargs + ) -> BaseLLMResponse: + response = await self.client.chat.completions.create( + model=model, + messages=messages, + **kwargs + ) + choice = response.choices[0] + return BaseLLMResponse( + content=choice.message.content or "", + usage=response.usage.model_dump() if response.usage else None + ) + + async def stream_chat( + self, + messages: List[Dict[str, str]], + model: str = "gpt-4o-mini", + **kwargs + ) -> AsyncGenerator[BaseLLMResponse, None]: + stream = await self.client.chat.completions.create( + model=model, messages=messages, + stream=True, **kwargs ) - return response.choices[0].message.content \ No newline at end of file + async for chunk in stream: + if chunk.choices[0].delta.content: + yield BaseLLMResponse(content=chunk.choices[0].delta.content) + +class AnthropicLLM(LLMClient): + def __init__(self): + self.client = anthropic.Anthropic(api_key=settings.anthropic_api_key) + + async def chat( + self, + messages: List[Dict[str, str]], + model: str = "claude-3-5-sonnet-20240620", + **kwargs + ) -> BaseLLMResponse: + # Convert OpenAI format to Anthropic + system = next((m["content"] for m in messages if m["role"] == "system"), "") + user_messages = [m for m in messages if m["role"] != "system"] + + response = await self.client.messages.create( + model=model, + max_tokens=4096, + messages=user_messages, + system=system, + **kwargs + ) + return BaseLLMResponse(content=response.content[0].text) + + async def stream_chat( + self, + messages: List[Dict[str, str]], + model: str = "claude-3-5-sonnet-20240620", + **kwargs + ) -> AsyncGenerator[BaseLLMResponse, None]: + # Streaming implementation for Anthropic + raise NotImplementedError("Anthropic streaming - Week 3 stretch") + +class LLMFactory: + @staticmethod + def create() -> LLMClient: + if settings.llm_provider == "openai" or ( + settings.llm_provider == "auto" and settings.openai_api_key + ): + return OpenAILLM() + elif settings.anthropic_api_key: + return AnthropicLLM() + else: + raise ValueError("No valid LLM provider configured")