-
Notifications
You must be signed in to change notification settings - Fork 82
LCORE-1262: Use context data class in responses #1612
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
asimurka
wants to merge
2
commits into
lightspeed-core:main
Choose a base branch
from
asimurka:refactor_responses_endpoint
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,175 @@ | ||
| """Request parameter model for Llama Stack responses API calls.""" | ||
|
|
||
| from collections.abc import Mapping | ||
| from typing import Any, Final, Optional | ||
|
|
||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseInputTool as InputTool, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseInputToolChoice as ToolChoice, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponsePrompt as Prompt, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseReasoning as Reasoning, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseText as Text, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseToolMCP as OutputToolMCP, | ||
| ) | ||
| from pydantic import BaseModel, Field | ||
|
|
||
| from utils.tool_formatter import translate_vector_store_ids_to_user_facing | ||
| from utils.types import IncludeParameter, ResponseInput | ||
|
|
||
| # Attribute names that are echoed back in the response. | ||
| _ECHOED_FIELDS: Final[set[str]] = set( | ||
| { | ||
| "instructions", | ||
| "max_tool_calls", | ||
| "max_output_tokens", | ||
| "metadata", | ||
| "model", | ||
| "parallel_tool_calls", | ||
| "previous_response_id", | ||
| "prompt", | ||
| "reasoning", | ||
| "safety_identifier", | ||
| "temperature", | ||
| "top_p", | ||
| "truncation", | ||
| "text", | ||
| "tool_choice", | ||
| "store", | ||
| } | ||
| ) | ||
|
coderabbitai[bot] marked this conversation as resolved.
|
||
|
|
||
|
|
||
| class ResponsesApiParams(BaseModel): | ||
| """Parameters for a Llama Stack Responses API request. | ||
|
|
||
| All fields accepted by the Llama Stack client responses.create() body are | ||
| included so that dumped model can be passed directly to response create. | ||
| """ | ||
|
|
||
| input: ResponseInput = Field(description="The input text or structured input items") | ||
| model: str = Field(description='The full model ID in format "provider/model"') | ||
| conversation: str = Field(description="The conversation ID in llama-stack format") | ||
| include: Optional[list[IncludeParameter]] = Field( | ||
| default=None, | ||
| description="Output item types to include in the response", | ||
| ) | ||
| instructions: Optional[str] = Field( | ||
| default=None, description="The resolved system prompt" | ||
| ) | ||
| max_infer_iters: Optional[int] = Field( | ||
| default=None, | ||
| description="Maximum number of inference iterations", | ||
| ) | ||
| max_output_tokens: Optional[int] = Field( | ||
| default=None, | ||
| description="Maximum number of tokens allowed in the response", | ||
| ) | ||
| max_tool_calls: Optional[int] = Field( | ||
| default=None, | ||
| description="Maximum tool calls allowed in a single response", | ||
| ) | ||
| metadata: Optional[dict[str, str]] = Field( | ||
| default=None, | ||
| description="Custom metadata for tracking or logging", | ||
| ) | ||
| parallel_tool_calls: Optional[bool] = Field( | ||
| default=None, | ||
| description="Whether the model can make multiple tool calls in parallel", | ||
| ) | ||
| previous_response_id: Optional[str] = Field( | ||
| default=None, | ||
| description="Identifier of the previous response in a multi-turn conversation", | ||
| ) | ||
| prompt: Optional[Prompt] = Field( | ||
| default=None, | ||
| description="Prompt template with variables for dynamic substitution", | ||
| ) | ||
| reasoning: Optional[Reasoning] = Field( | ||
| default=None, | ||
| description="Reasoning configuration for the response", | ||
| ) | ||
| safety_identifier: Optional[str] = Field( | ||
| default=None, | ||
| description="Stable identifier for safety monitoring and abuse detection", | ||
| ) | ||
| store: bool = Field(description="Whether to store the response") | ||
| stream: bool = Field(description="Whether to stream the response") | ||
| temperature: Optional[float] = Field( | ||
| default=None, | ||
| description="Sampling temperature (e.g. 0.0-2.0)", | ||
| ) | ||
| text: Optional[Text] = Field( | ||
| default=None, | ||
| description="Text response configuration (format constraints)", | ||
| ) | ||
| tool_choice: Optional[ToolChoice] = Field( | ||
| default=None, | ||
| description="Tool selection strategy", | ||
| ) | ||
| tools: Optional[list[InputTool]] = Field( | ||
| default=None, | ||
| description="Prepared tool groups for Responses API (same type as ResponsesRequest.tools)", | ||
| ) | ||
| extra_headers: Optional[dict[str, str]] = Field( | ||
| default=None, | ||
| description="Extra HTTP headers to send with the request (e.g. x-llamastack-provider-data)", | ||
| ) | ||
|
|
||
| def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]: | ||
| """Serialize params, re-injecting MCP authorization stripped by exclude=True. | ||
|
|
||
| llama-stack-api marks ``InputToolMCP.authorization`` with | ||
| ``Field(exclude=True)`` to prevent token leakage in API responses. | ||
| The base ``model_dump()`` therefore strips the field, but we need it | ||
| in the request payload so llama-stack server can authenticate with | ||
| MCP servers. See LCORE-1414 / GitHub issue #1269. | ||
| """ | ||
| result = super().model_dump(*args, **kwargs) | ||
| # Only one context option is allowed, previous_response_id has priority | ||
| # Turn is added to conversation manually if previous_response_id is used | ||
| if self.previous_response_id: | ||
| result.pop("conversation", None) | ||
| dumped_tools = result.get("tools") | ||
| if not self.tools or not isinstance(dumped_tools, list): | ||
| return result | ||
| if len(dumped_tools) != len(self.tools): | ||
| return result | ||
| for tool, dumped_tool in zip(self.tools, dumped_tools): | ||
| authorization = getattr(tool, "authorization", None) | ||
| if authorization is not None and isinstance(dumped_tool, dict): | ||
| dumped_tool["authorization"] = authorization | ||
| return result | ||
|
|
||
| def echoed_params(self, rag_id_mapping: Mapping[str, str]) -> dict[str, Any]: | ||
| """Build kwargs echoed into synthetic OpenAI-style responses (e.g. moderation blocks). | ||
|
|
||
| Parameters: | ||
| rag_id_mapping: Llama Stack vector_db_id to user-facing RAG id (from app config). | ||
| Returns: | ||
| dict[str, Any]: Field names and values to merge into the response object. | ||
| """ | ||
| data = self.model_dump(include=_ECHOED_FIELDS) | ||
| if self.tools is not None: | ||
| tool_dicts: list[dict[str, Any]] = [] | ||
| for t in self.tools: | ||
| if t.type == "mcp": | ||
| validated = OutputToolMCP.model_validate(t.model_dump()) | ||
| tool_dicts.append(validated.model_dump()) | ||
| else: | ||
| tool_dicts.append(t.model_dump()) | ||
|
|
||
| data["tools"] = translate_vector_store_ids_to_user_facing( | ||
| tool_dicts, rag_id_mapping | ||
| ) | ||
|
|
||
| return data | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,45 @@ | ||
| """Request-scoped context model for the responses endpoint pipeline.""" | ||
|
|
||
| from datetime import datetime | ||
| from typing import Optional | ||
|
|
||
| from fastapi import BackgroundTasks | ||
| from llama_stack_client import AsyncLlamaStackClient | ||
| from pydantic import BaseModel, ConfigDict, Field | ||
|
|
||
| from utils.types import RAGContext, ShieldModerationResult | ||
|
|
||
|
|
||
| class ResponsesContext(BaseModel): | ||
| """Shared request-scoped context for the /responses endpoint pipeline.""" | ||
|
|
||
| model_config = ConfigDict(arbitrary_types_allowed=True) | ||
|
|
||
| client: AsyncLlamaStackClient = Field(description="The Llama Stack client") | ||
| auth: tuple[str, str, bool, str] = Field( | ||
| description="Authentication tuple (user_id, username, skip_userid_check, token)", | ||
| ) | ||
| input_text: str = Field(description="Extracted user input text for the turn") | ||
| started_at: datetime = Field(description="UTC timestamp when the request started") | ||
| moderation_result: ShieldModerationResult = Field( | ||
| description="Shield moderation outcome", | ||
| ) | ||
| inline_rag_context: RAGContext = Field( | ||
| description="Inline RAG context for the turn" | ||
| ) | ||
| filter_server_tools: bool = Field( | ||
| default=False, | ||
| description="Whether to filter server-deployed MCP tool events from output", | ||
| ) | ||
| background_tasks: Optional[BackgroundTasks] = Field( | ||
| default=None, | ||
| description="Background tasks for telemetry, if enabled", | ||
| ) | ||
| rh_identity_context: tuple[str, str] = Field( | ||
| default_factory=lambda: ("", ""), | ||
| description="RH identity (org_id, system_id) for Splunk events", | ||
| ) | ||
| generate_topic_summary: bool = Field( | ||
| default=False, | ||
| description="Whether to generate a topic summary for new conversations", | ||
| ) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,12 +21,8 @@ | |
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseText as Text, | ||
| ) | ||
| from llama_stack_api.openai_responses import ( | ||
| OpenAIResponseToolMCP as OutputToolMCP, | ||
| ) | ||
| from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator | ||
|
|
||
| from configuration import configuration | ||
| from constants import ( | ||
| MCP_AUTH_CLIENT, | ||
| MCP_AUTH_KUBERNETES, | ||
|
|
@@ -38,7 +34,6 @@ | |
| ) | ||
| from log import get_logger | ||
| from utils import suid | ||
| from utils.tool_formatter import translate_vector_store_ids_to_user_facing | ||
| from utils.types import IncludeParameter, ResponseInput | ||
|
|
||
| logger = get_logger(__name__) | ||
|
|
@@ -867,28 +862,6 @@ def check_previous_response_id(cls, value: Optional[str]) -> Optional[str]: | |
| raise ValueError("You cannot provide context by moderation response.") | ||
| return value | ||
|
|
||
| def echoed_params(self) -> dict[str, Any]: | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Replaced under |
||
| """Build kwargs echoed into synthetic OpenAI-style responses (e.g. moderation blocks). | ||
|
|
||
| Returns: | ||
| dict[str, Any]: Field names and values to merge into the response object. | ||
| """ | ||
| data = self.model_dump(include=_ECHOED_FIELDS) | ||
| if self.tools is not None: | ||
| tool_dicts: list[dict[str, Any]] = [ | ||
| ( | ||
| OutputToolMCP.model_validate(t.model_dump()).model_dump() | ||
| if t.type == "mcp" | ||
| else t.model_dump() | ||
| ) | ||
| for t in self.tools | ||
| ] | ||
| data["tools"] = translate_vector_store_ids_to_user_facing( | ||
| tool_dicts, configuration.rag_id_mapping | ||
| ) | ||
|
|
||
| return data | ||
|
|
||
|
|
||
| class MCPServerRegistrationRequest(BaseModel): | ||
| """Request model for dynamically registering an MCP server. | ||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.