Skip to content

Commit 6c9588d

Browse files
chore: use Pydantic to generate OpenAPI schema
1 parent e71e7a9 commit 6c9588d

File tree

204 files changed

+7448
-7999
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

204 files changed

+7448
-7999
lines changed

.stats.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 96
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-602ce64aa309cc15fa09388d99c9f298795686fc37605237cbc03c39d29aabf6.yml
3-
openapi_spec_hash: fc6995247b2555e8660bc9291eb10415
4-
config_hash: e8a35d9d37cb4774b4b0fe1b167dc156
1+
configured_endpoints: 103
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-2b99a80543f8bc8fa164167693c214651ac8e710f4726fb5869183b4d6c71a03.yml
3+
openapi_spec_hash: a5632057f5e4d956a71c20a79c0d879c
4+
config_hash: 0017f6c419cbbf7b949f9b2842917a79

README.md

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,7 @@ from llama_stack_client import LlamaStackClient
3333

3434
client = LlamaStackClient()
3535

36-
response = client.models.register(
37-
model_id="model_id",
38-
)
39-
print(response.identifier)
36+
models = client.models.list()
4037
```
4138

4239
While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `LLAMA_STACK_CLIENT_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control.
@@ -93,10 +90,7 @@ client = AsyncLlamaStackClient(
9390

9491

9592
async def main() -> None:
96-
response = await client.models.register(
97-
model_id="model_id",
98-
)
99-
print(response.identifier)
93+
models = await client.models.list()
10094

10195

10296
asyncio.run(main())
@@ -127,10 +121,7 @@ async def main() -> None:
127121
async with AsyncLlamaStackClient(
128122
http_client=DefaultAioHttpClient(),
129123
) as client:
130-
response = await client.models.register(
131-
model_id="model_id",
132-
)
133-
print(response.identifier)
124+
models = await client.models.list()
134125

135126

136127
asyncio.run(main())
@@ -156,7 +147,7 @@ stream = client.chat.completions.create(
156147
stream=True,
157148
)
158149
for completion in stream:
159-
print(completion)
150+
print(completion.id)
160151
```
161152

162153
The async client uses the exact same interface.
@@ -177,7 +168,7 @@ stream = await client.chat.completions.create(
177168
stream=True,
178169
)
179170
async for completion in stream:
180-
print(completion)
171+
print(completion.id)
181172
```
182173

183174
## Using types
@@ -378,7 +369,7 @@ response = client.chat.completions.with_raw_response.create(
378369
print(response.headers.get('X-My-Header'))
379370

380371
completion = response.parse() # get the object that `chat.completions.create()` would have returned
381-
print(completion)
372+
print(completion.id)
382373
```
383374

384375
These methods return an [`APIResponse`](https://github.com/meta-llama/llama-stack-python/tree/main/src/llama_stack_client/_response.py) object.

api.md

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ from llama_stack_client.types import ToolDef, ToolInvocationResult, ToolRuntimeL
5050

5151
Methods:
5252

53-
- <code title="post /v1/tool-runtime/invoke">client.tool_runtime.<a href="./src/llama_stack_client/resources/tool_runtime/tool_runtime.py">invoke_tool</a>(\*\*<a href="src/llama_stack_client/types/tool_runtime_invoke_tool_params.py">params</a>) -> <a href="./src/llama_stack_client/types/tool_invocation_result.py">ToolInvocationResult</a></code>
54-
- <code title="get /v1/tool-runtime/list-tools">client.tool_runtime.<a href="./src/llama_stack_client/resources/tool_runtime/tool_runtime.py">list_tools</a>(\*\*<a href="src/llama_stack_client/types/tool_runtime_list_tools_params.py">params</a>) -> <a href="./src/llama_stack_client/types/tool_runtime_list_tools_response.py">ToolRuntimeListToolsResponse</a></code>
53+
- <code title="post /v1/tool-runtime/invoke">client.tool_runtime.<a href="./src/llama_stack_client/resources/tool_runtime.py">invoke_tool</a>(\*\*<a href="src/llama_stack_client/types/tool_runtime_invoke_tool_params.py">params</a>) -> <a href="./src/llama_stack_client/types/tool_invocation_result.py">ToolInvocationResult</a></code>
54+
- <code title="get /v1/tool-runtime/list-tools">client.tool_runtime.<a href="./src/llama_stack_client/resources/tool_runtime.py">list_tools</a>(\*\*<a href="src/llama_stack_client/types/tool_runtime_list_tools_params.py">params</a>) -> <a href="./src/llama_stack_client/types/tool_runtime_list_tools_response.py">ToolRuntimeListToolsResponse</a></code>
5555

5656
# Responses
5757

@@ -131,6 +131,7 @@ Types:
131131
from llama_stack_client.types.conversations import (
132132
ItemCreateResponse,
133133
ItemListResponse,
134+
ItemDeleteResponse,
134135
ItemGetResponse,
135136
)
136137
```
@@ -139,6 +140,7 @@ Methods:
139140

140141
- <code title="post /v1/conversations/{conversation_id}/items">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">create</a>(conversation_id, \*\*<a href="src/llama_stack_client/types/conversations/item_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversations/item_create_response.py">ItemCreateResponse</a></code>
141142
- <code title="get /v1/conversations/{conversation_id}/items">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">list</a>(conversation_id, \*\*<a href="src/llama_stack_client/types/conversations/item_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversations/item_list_response.py">SyncOpenAICursorPage[ItemListResponse]</a></code>
143+
- <code title="delete /v1/conversations/{conversation_id}/items/{item_id}">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">delete</a>(item_id, \*, conversation_id) -> <a href="./src/llama_stack_client/types/conversations/item_delete_response.py">ItemDeleteResponse</a></code>
142144
- <code title="get /v1/conversations/{conversation_id}/items/{item_id}">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">get</a>(item_id, \*, conversation_id) -> <a href="./src/llama_stack_client/types/conversations/item_get_response.py">ItemGetResponse</a></code>
143145

144146
# Inspect
@@ -190,7 +192,7 @@ Methods:
190192

191193
- <code title="post /v1/chat/completions">client.chat.completions.<a href="./src/llama_stack_client/resources/chat/completions.py">create</a>(\*\*<a href="src/llama_stack_client/types/chat/completion_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/chat/completion_create_response.py">CompletionCreateResponse</a></code>
192194
- <code title="get /v1/chat/completions/{completion_id}">client.chat.completions.<a href="./src/llama_stack_client/resources/chat/completions.py">retrieve</a>(completion_id) -> <a href="./src/llama_stack_client/types/chat/completion_retrieve_response.py">CompletionRetrieveResponse</a></code>
193-
- <code title="get /v1/chat/completions">client.chat.completions.<a href="./src/llama_stack_client/resources/chat/completions.py">list</a>(\*\*<a href="src/llama_stack_client/types/chat/completion_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/chat/completion_list_response.py">SyncOpenAICursorPage[CompletionListResponse]</a></code>
195+
- <code title="get /v1/chat/completions">client.chat.completions.<a href="./src/llama_stack_client/resources/chat/completions.py">list</a>(\*\*<a href="src/llama_stack_client/types/chat/completion_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/chat/completion_list_response.py">CompletionListResponse</a></code>
194196

195197
# Completions
196198

@@ -400,6 +402,7 @@ Methods:
400402
- <code title="get /v1/scoring-functions/{scoring_fn_id}">client.scoring_functions.<a href="./src/llama_stack_client/resources/scoring_functions.py">retrieve</a>(scoring_fn_id) -> <a href="./src/llama_stack_client/types/scoring_fn.py">ScoringFn</a></code>
401403
- <code title="get /v1/scoring-functions">client.scoring_functions.<a href="./src/llama_stack_client/resources/scoring_functions.py">list</a>() -> <a href="./src/llama_stack_client/types/scoring_function_list_response.py">ScoringFunctionListResponse</a></code>
402404
- <code title="post /v1/scoring-functions">client.scoring_functions.<a href="./src/llama_stack_client/resources/scoring_functions.py">register</a>(\*\*<a href="src/llama_stack_client/types/scoring_function_register_params.py">params</a>) -> None</code>
405+
- <code title="delete /v1/scoring-functions/{scoring_fn_id}">client.scoring_functions.<a href="./src/llama_stack_client/resources/scoring_functions.py">unregister</a>(scoring_fn_id) -> None</code>
403406

404407
# Files
405408

@@ -417,6 +420,26 @@ Methods:
417420
- <code title="delete /v1/files/{file_id}">client.files.<a href="./src/llama_stack_client/resources/files.py">delete</a>(file_id) -> <a href="./src/llama_stack_client/types/delete_file_response.py">DeleteFileResponse</a></code>
418421
- <code title="get /v1/files/{file_id}/content">client.files.<a href="./src/llama_stack_client/resources/files.py">content</a>(file_id) -> object</code>
419422

423+
# Batches
424+
425+
Types:
426+
427+
```python
428+
from llama_stack_client.types import (
429+
BatchCreateResponse,
430+
BatchRetrieveResponse,
431+
BatchListResponse,
432+
BatchCancelResponse,
433+
)
434+
```
435+
436+
Methods:
437+
438+
- <code title="post /v1/batches">client.batches.<a href="./src/llama_stack_client/resources/batches.py">create</a>(\*\*<a href="src/llama_stack_client/types/batch_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/batch_create_response.py">BatchCreateResponse</a></code>
439+
- <code title="get /v1/batches/{batch_id}">client.batches.<a href="./src/llama_stack_client/resources/batches.py">retrieve</a>(batch_id) -> <a href="./src/llama_stack_client/types/batch_retrieve_response.py">BatchRetrieveResponse</a></code>
440+
- <code title="get /v1/batches">client.batches.<a href="./src/llama_stack_client/resources/batches.py">list</a>(\*\*<a href="src/llama_stack_client/types/batch_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/batch_list_response.py">SyncOpenAICursorPage[BatchListResponse]</a></code>
441+
- <code title="post /v1/batches/{batch_id}/cancel">client.batches.<a href="./src/llama_stack_client/resources/batches.py">cancel</a>(batch_id) -> <a href="./src/llama_stack_client/types/batch_cancel_response.py">BatchCancelResponse</a></code>
442+
420443
# Alpha
421444

422445
## Inference
@@ -480,6 +503,7 @@ Methods:
480503
- <code title="get /v1alpha/eval/benchmarks/{benchmark_id}">client.alpha.benchmarks.<a href="./src/llama_stack_client/resources/alpha/benchmarks.py">retrieve</a>(benchmark_id) -> <a href="./src/llama_stack_client/types/alpha/benchmark.py">Benchmark</a></code>
481504
- <code title="get /v1alpha/eval/benchmarks">client.alpha.benchmarks.<a href="./src/llama_stack_client/resources/alpha/benchmarks.py">list</a>() -> <a href="./src/llama_stack_client/types/alpha/benchmark_list_response.py">BenchmarkListResponse</a></code>
482505
- <code title="post /v1alpha/eval/benchmarks">client.alpha.benchmarks.<a href="./src/llama_stack_client/resources/alpha/benchmarks.py">register</a>(\*\*<a href="src/llama_stack_client/types/alpha/benchmark_register_params.py">params</a>) -> None</code>
506+
- <code title="delete /v1alpha/eval/benchmarks/{benchmark_id}">client.alpha.benchmarks.<a href="./src/llama_stack_client/resources/alpha/benchmarks.py">unregister</a>(benchmark_id) -> None</code>
483507

484508
## Eval
485509

src/llama_stack_client/_client.py

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
models,
4848
routes,
4949
safety,
50+
batches,
5051
inspect,
5152
prompts,
5253
scoring,
@@ -67,6 +68,7 @@
6768
from .resources.tools import ToolsResource, AsyncToolsResource
6869
from .resources.routes import RoutesResource, AsyncRoutesResource
6970
from .resources.safety import SafetyResource, AsyncSafetyResource
71+
from .resources.batches import BatchesResource, AsyncBatchesResource
7072
from .resources.inspect import InspectResource, AsyncInspectResource
7173
from .resources.scoring import ScoringResource, AsyncScoringResource
7274
from .resources.shields import ShieldsResource, AsyncShieldsResource
@@ -79,11 +81,11 @@
7981
from .resources.alpha.alpha import AlphaResource, AsyncAlphaResource
8082
from .resources.completions import CompletionsResource, AsyncCompletionsResource
8183
from .resources.moderations import ModerationsResource, AsyncModerationsResource
84+
from .resources.tool_runtime import ToolRuntimeResource, AsyncToolRuntimeResource
8285
from .resources.models.models import ModelsResource, AsyncModelsResource
8386
from .resources.prompts.prompts import PromptsResource, AsyncPromptsResource
8487
from .resources.scoring_functions import ScoringFunctionsResource, AsyncScoringFunctionsResource
8588
from .resources.responses.responses import ResponsesResource, AsyncResponsesResource
86-
from .resources.tool_runtime.tool_runtime import ToolRuntimeResource, AsyncToolRuntimeResource
8789
from .resources.conversations.conversations import ConversationsResource, AsyncConversationsResource
8890
from .resources.vector_stores.vector_stores import VectorStoresResource, AsyncVectorStoresResource
8991

@@ -282,6 +284,12 @@ def files(self) -> FilesResource:
282284

283285
return FilesResource(self)
284286

287+
@cached_property
288+
def batches(self) -> BatchesResource:
289+
from .resources.batches import BatchesResource
290+
291+
return BatchesResource(self)
292+
285293
@cached_property
286294
def alpha(self) -> AlphaResource:
287295
from .resources.alpha import AlphaResource
@@ -592,6 +600,12 @@ def files(self) -> AsyncFilesResource:
592600

593601
return AsyncFilesResource(self)
594602

603+
@cached_property
604+
def batches(self) -> AsyncBatchesResource:
605+
from .resources.batches import AsyncBatchesResource
606+
607+
return AsyncBatchesResource(self)
608+
595609
@cached_property
596610
def alpha(self) -> AsyncAlphaResource:
597611
from .resources.alpha import AsyncAlphaResource
@@ -851,6 +865,12 @@ def files(self) -> files.FilesResourceWithRawResponse:
851865

852866
return FilesResourceWithRawResponse(self._client.files)
853867

868+
@cached_property
869+
def batches(self) -> batches.BatchesResourceWithRawResponse:
870+
from .resources.batches import BatchesResourceWithRawResponse
871+
872+
return BatchesResourceWithRawResponse(self._client.batches)
873+
854874
@cached_property
855875
def alpha(self) -> alpha.AlphaResourceWithRawResponse:
856876
from .resources.alpha import AlphaResourceWithRawResponse
@@ -996,6 +1016,12 @@ def files(self) -> files.AsyncFilesResourceWithRawResponse:
9961016

9971017
return AsyncFilesResourceWithRawResponse(self._client.files)
9981018

1019+
@cached_property
1020+
def batches(self) -> batches.AsyncBatchesResourceWithRawResponse:
1021+
from .resources.batches import AsyncBatchesResourceWithRawResponse
1022+
1023+
return AsyncBatchesResourceWithRawResponse(self._client.batches)
1024+
9991025
@cached_property
10001026
def alpha(self) -> alpha.AsyncAlphaResourceWithRawResponse:
10011027
from .resources.alpha import AsyncAlphaResourceWithRawResponse
@@ -1141,6 +1167,12 @@ def files(self) -> files.FilesResourceWithStreamingResponse:
11411167

11421168
return FilesResourceWithStreamingResponse(self._client.files)
11431169

1170+
@cached_property
1171+
def batches(self) -> batches.BatchesResourceWithStreamingResponse:
1172+
from .resources.batches import BatchesResourceWithStreamingResponse
1173+
1174+
return BatchesResourceWithStreamingResponse(self._client.batches)
1175+
11441176
@cached_property
11451177
def alpha(self) -> alpha.AlphaResourceWithStreamingResponse:
11461178
from .resources.alpha import AlphaResourceWithStreamingResponse
@@ -1286,6 +1318,12 @@ def files(self) -> files.AsyncFilesResourceWithStreamingResponse:
12861318

12871319
return AsyncFilesResourceWithStreamingResponse(self._client.files)
12881320

1321+
@cached_property
1322+
def batches(self) -> batches.AsyncBatchesResourceWithStreamingResponse:
1323+
from .resources.batches import AsyncBatchesResourceWithStreamingResponse
1324+
1325+
return AsyncBatchesResourceWithStreamingResponse(self._client.batches)
1326+
12891327
@cached_property
12901328
def alpha(self) -> alpha.AsyncAlphaResourceWithStreamingResponse:
12911329
from .resources.alpha import AsyncAlphaResourceWithStreamingResponse

src/llama_stack_client/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,14 @@
7070
SafetyResourceWithStreamingResponse,
7171
AsyncSafetyResourceWithStreamingResponse,
7272
)
73+
from .batches import (
74+
BatchesResource,
75+
AsyncBatchesResource,
76+
BatchesResourceWithRawResponse,
77+
AsyncBatchesResourceWithRawResponse,
78+
BatchesResourceWithStreamingResponse,
79+
AsyncBatchesResourceWithStreamingResponse,
80+
)
7381
from .inspect import (
7482
InspectResource,
7583
AsyncInspectResource,
@@ -318,6 +326,12 @@
318326
"AsyncFilesResourceWithRawResponse",
319327
"FilesResourceWithStreamingResponse",
320328
"AsyncFilesResourceWithStreamingResponse",
329+
"BatchesResource",
330+
"AsyncBatchesResource",
331+
"BatchesResourceWithRawResponse",
332+
"AsyncBatchesResourceWithRawResponse",
333+
"BatchesResourceWithStreamingResponse",
334+
"AsyncBatchesResourceWithStreamingResponse",
321335
"AlphaResource",
322336
"AsyncAlphaResource",
323337
"AlphaResourceWithRawResponse",

0 commit comments

Comments
 (0)