From d1c8f4da8db6721ccec8f5e9b04f3ed9ab8cc713 Mon Sep 17 00:00:00 2001 From: SSharma-10 Date: Wed, 22 Apr 2026 16:51:19 +0530 Subject: [PATCH] genAI Changes --- src/pydo/aio/operations/_operations.py | 24240 ++++++++++++++++++++--- src/pydo/operations/_operations.py | 20954 +++++++++++++++++++- 2 files changed, 41504 insertions(+), 3690 deletions(-) diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index 5a3d9016..6b344c01 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -269,6 +269,8 @@ build_genai_create_knowledge_base_data_source_request, build_genai_create_knowledge_base_request, build_genai_create_model_api_key_request, + build_genai_create_model_eval_dataset_upload_presigned_urls_request, + build_genai_create_model_evaluation_run_request, build_genai_create_oauth2_dropbox_tokens_request, build_genai_create_openai_api_key_request, build_genai_create_scheduled_indexing_request, @@ -276,6 +278,7 @@ build_genai_delete_agent_api_key_request, build_genai_delete_agent_request, build_genai_delete_anthropic_api_key_request, + build_genai_delete_custom_model_request, build_genai_delete_knowledge_base_data_source_request, build_genai_delete_knowledge_base_request, build_genai_delete_model_api_key_request, @@ -290,6 +293,7 @@ build_genai_get_agent_request, build_genai_get_agent_usage_request, build_genai_get_anthropic_api_key_request, + build_genai_get_evaluation_dataset_download_url_request, build_genai_get_evaluation_run_prompt_results_request, build_genai_get_evaluation_run_request, build_genai_get_evaluation_run_results_request, @@ -297,10 +301,13 @@ build_genai_get_indexing_job_details_signed_url_request, build_genai_get_indexing_job_request, build_genai_get_knowledge_base_request, + build_genai_get_model_evaluation_run_request, + build_genai_get_model_evaluation_run_results_download_url_request, build_genai_get_oauth2_url_request, build_genai_get_openai_api_key_request, build_genai_get_scheduled_indexing_request, build_genai_get_workspace_request, + build_genai_import_custom_model_request, build_genai_list_agent_api_keys_request, build_genai_list_agent_versions_request, build_genai_list_agents_by_anthropic_key_request, @@ -308,6 +315,7 @@ build_genai_list_agents_by_workspace_request, build_genai_list_agents_request, build_genai_list_anthropic_api_keys_request, + build_genai_list_custom_models_request, build_genai_list_datacenter_regions_request, build_genai_list_evaluation_metrics_request, build_genai_list_evaluation_runs_by_test_case_request, @@ -319,6 +327,8 @@ build_genai_list_knowledge_base_data_sources_request, build_genai_list_knowledge_bases_request, build_genai_list_model_api_keys_request, + build_genai_list_model_evaluation_metrics_request, + build_genai_list_model_evaluation_runs_request, build_genai_list_models_request, build_genai_list_openai_api_keys_request, build_genai_list_workspaces_request, @@ -333,6 +343,7 @@ build_genai_update_agents_workspace_request, build_genai_update_anthropic_api_key_request, build_genai_update_attached_agent_request, + build_genai_update_custom_model_metadata_request, build_genai_update_evaluation_test_case_request, build_genai_update_knowledge_base_data_source_request, build_genai_update_knowledge_base_request, @@ -200954,6 +200965,24 @@ async def list_agents( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -200965,8 +200994,27 @@ async def list_agents( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -200979,14 +201027,59 @@ async def list_agents( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -201004,11 +201097,52 @@ async def list_agents( "patch": 0 # Optional. Patch version number. } }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "project_id": "str", # Optional. The DigitalOcean project ID associated with the agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -201160,6 +201294,12 @@ async def list_agents( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -201187,8 +201327,27 @@ async def list_agents( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -201202,16 +201361,64 @@ async def list_agents( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -201255,6 +201462,8 @@ async def list_agents( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure @@ -201415,18 +201624,39 @@ async def create_agent( "str" # Optional. Ids of the knowledge base(s) to attach to the agent. ], + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI API key ID to use with OpenAI models. "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. + "reasoning_effort": "str", # Optional. "region": "str", # Optional. The DigitalOcean region to deploy your agent in. + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. Agent tag to organize related resources. ], + "thinking_token_budget": 0, # Optional. "workspace_uuid": "str" # Optional. Identifier for the workspace. } @@ -201667,6 +201897,12 @@ async def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -201693,6 +201929,24 @@ async def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -201701,8 +201955,27 @@ async def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -201714,14 +201987,56 @@ async def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -201756,8 +202071,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -201771,16 +202105,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -201809,6 +202191,44 @@ async def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -201828,8 +202248,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -201843,16 +202282,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -201884,6 +202371,8 @@ async def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -202028,6 +202517,12 @@ async def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -202055,8 +202550,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -202069,14 +202583,59 @@ async def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -202116,6 +202675,8 @@ async def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -202155,6 +202716,13 @@ async def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -202185,6 +202753,15 @@ async def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -202198,7 +202775,8 @@ async def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -202522,6 +203100,12 @@ async def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -202548,6 +203132,24 @@ async def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -202556,8 +203158,27 @@ async def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -202569,14 +203190,56 @@ async def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -202611,8 +203274,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -202626,16 +203308,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -202664,6 +203394,44 @@ async def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -202683,8 +203451,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -202698,16 +203485,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -202739,6 +203574,8 @@ async def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -202883,6 +203720,12 @@ async def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -202910,8 +203753,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -202924,14 +203786,59 @@ async def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -202971,6 +203878,8 @@ async def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -203010,6 +203919,13 @@ async def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -203040,6 +203956,15 @@ async def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -203053,7 +203978,8 @@ async def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -203147,18 +204073,39 @@ async def create_agent( "str" # Optional. Ids of the knowledge base(s) to attach to the agent. ], + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI API key ID to use with OpenAI models. "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. + "reasoning_effort": "str", # Optional. "region": "str", # Optional. The DigitalOcean region to deploy your agent in. + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. Agent tag to organize related resources. ], + "thinking_token_budget": 0, # Optional. "workspace_uuid": "str" # Optional. Identifier for the workspace. } @@ -203399,6 +204346,12 @@ async def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -203425,6 +204378,24 @@ async def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -203433,8 +204404,27 @@ async def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -203446,14 +204436,56 @@ async def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -203488,8 +204520,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -203503,16 +204554,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -203541,6 +204640,44 @@ async def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -203560,8 +204697,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -203575,16 +204731,64 @@ async def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -203616,6 +204820,8 @@ async def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -203760,6 +204966,12 @@ async def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -203787,8 +204999,27 @@ async def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -203801,14 +205032,59 @@ async def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -203848,6 +205124,8 @@ async def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -203887,6 +205165,13 @@ async def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -203917,6 +205202,15 @@ async def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -203930,7 +205224,8 @@ async def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -205287,6 +206582,12 @@ async def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -205313,6 +206614,24 @@ async def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -205321,8 +206640,27 @@ async def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -205334,14 +206672,56 @@ async def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -205376,8 +206756,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -205391,16 +206790,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -205429,6 +206876,44 @@ async def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -205448,8 +206933,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -205463,16 +206967,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -205504,6 +207056,8 @@ async def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -205648,6 +207202,12 @@ async def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -205675,8 +207235,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -205689,14 +207268,59 @@ async def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -205736,6 +207360,8 @@ async def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -205775,6 +207401,13 @@ async def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -205805,6 +207438,15 @@ async def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -205818,7 +207460,8 @@ async def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -206145,6 +207788,12 @@ async def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -206171,6 +207820,24 @@ async def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -206179,8 +207846,27 @@ async def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -206192,14 +207878,56 @@ async def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -206234,8 +207962,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -206249,16 +207996,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -206287,6 +208082,44 @@ async def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -206306,8 +208139,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -206321,16 +208173,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -206362,6 +208262,8 @@ async def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -206506,6 +208408,12 @@ async def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -206533,8 +208441,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -206547,14 +208474,59 @@ async def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -206594,6 +208566,8 @@ async def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -206633,6 +208607,13 @@ async def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -206663,6 +208644,15 @@ async def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -206676,7 +208666,8 @@ async def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -207013,6 +209004,12 @@ async def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -207039,6 +209036,24 @@ async def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -207047,8 +209062,27 @@ async def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -207060,14 +209094,56 @@ async def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -207102,8 +209178,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -207117,16 +209212,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -207155,6 +209298,44 @@ async def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -207174,8 +209355,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -207189,16 +209389,64 @@ async def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -207230,6 +209478,8 @@ async def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -207374,6 +209624,12 @@ async def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -207401,8 +209657,27 @@ async def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -207415,14 +209690,59 @@ async def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -207462,6 +209782,8 @@ async def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -207501,6 +209823,13 @@ async def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -207531,6 +209860,15 @@ async def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -207544,7 +209882,8 @@ async def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -207984,6 +210323,12 @@ async def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -208010,6 +210355,24 @@ async def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -208018,8 +210381,27 @@ async def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -208031,14 +210413,56 @@ async def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -208073,8 +210497,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -208088,16 +210531,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -208126,6 +210617,44 @@ async def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -208145,8 +210674,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -208160,16 +210708,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -208201,6 +210797,8 @@ async def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -208345,6 +210943,12 @@ async def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -208372,8 +210976,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -208386,14 +211009,59 @@ async def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -208433,6 +211101,8 @@ async def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -208472,6 +211142,13 @@ async def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -208502,6 +211179,15 @@ async def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -208515,7 +211201,8 @@ async def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -208845,6 +211532,12 @@ async def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -208871,6 +211564,24 @@ async def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -208879,8 +211590,27 @@ async def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -208892,14 +211622,56 @@ async def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -208934,8 +211706,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -208949,16 +211740,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -208987,6 +211826,44 @@ async def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -209006,8 +211883,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -209021,16 +211917,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -209062,6 +212006,8 @@ async def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -209206,6 +212152,12 @@ async def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -209233,8 +212185,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -209247,14 +212218,59 @@ async def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -209294,6 +212310,8 @@ async def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -209333,6 +212351,13 @@ async def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -209363,6 +212388,15 @@ async def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -209376,7 +212410,8 @@ async def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -209717,6 +212752,12 @@ async def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -209743,6 +212784,24 @@ async def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -209751,8 +212810,27 @@ async def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -209764,14 +212842,56 @@ async def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -209806,8 +212926,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -209821,16 +212960,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -209859,6 +213046,44 @@ async def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -209878,8 +213103,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -209893,16 +213137,64 @@ async def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -209934,6 +213226,8 @@ async def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -210078,6 +213372,12 @@ async def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -210105,8 +213405,27 @@ async def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -210119,14 +213438,59 @@ async def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -210166,6 +213530,8 @@ async def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -210205,6 +213571,13 @@ async def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -210235,6 +213608,15 @@ async def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -210248,7 +213630,8 @@ async def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -210663,6 +214046,12 @@ async def detach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -210689,6 +214078,24 @@ async def detach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -210697,8 +214104,27 @@ async def detach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -210710,14 +214136,56 @@ async def detach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -210752,8 +214220,27 @@ async def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -210767,16 +214254,64 @@ async def detach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -210805,6 +214340,44 @@ async def detach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -210824,8 +214397,27 @@ async def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -210839,16 +214431,64 @@ async def detach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -210880,6 +214520,8 @@ async def detach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -211024,6 +214666,12 @@ async def detach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -211051,8 +214699,27 @@ async def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -211065,14 +214732,59 @@ async def detach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -211112,6 +214824,8 @@ async def detach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -211151,6 +214865,13 @@ async def detach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -211181,6 +214902,15 @@ async def detach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -211194,7 +214924,8 @@ async def detach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -211610,6 +215341,12 @@ async def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -211636,6 +215373,24 @@ async def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -211644,8 +215399,27 @@ async def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -211657,14 +215431,56 @@ async def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -211699,8 +215515,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -211714,16 +215549,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -211752,6 +215635,44 @@ async def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -211771,8 +215692,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -211786,16 +215726,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -211827,6 +215815,8 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -211971,6 +215961,12 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -211998,8 +215994,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -212012,14 +216027,59 @@ async def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -212059,6 +216119,8 @@ async def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -212098,6 +216160,13 @@ async def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -212128,6 +216197,15 @@ async def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -212141,7 +216219,8 @@ async def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -212468,6 +216547,12 @@ async def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -212494,6 +216579,24 @@ async def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -212502,8 +216605,27 @@ async def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -212515,14 +216637,56 @@ async def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -212557,8 +216721,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -212572,16 +216755,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -212610,6 +216841,44 @@ async def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -212629,8 +216898,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -212644,16 +216932,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -212685,6 +217021,8 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -212829,6 +217167,12 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -212856,8 +217200,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -212870,14 +217233,59 @@ async def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -212917,6 +217325,8 @@ async def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -212956,6 +217366,13 @@ async def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -212986,6 +217403,15 @@ async def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -212999,7 +217425,8 @@ async def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -213332,6 +217759,12 @@ async def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -213358,6 +217791,24 @@ async def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -213366,8 +217817,27 @@ async def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -213379,14 +217849,56 @@ async def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -213421,8 +217933,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -213436,16 +217967,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -213474,6 +218053,44 @@ async def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -213493,8 +218110,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -213508,16 +218144,64 @@ async def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -213549,6 +218233,8 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -213693,6 +218379,12 @@ async def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -213720,8 +218412,27 @@ async def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -213734,14 +218445,59 @@ async def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -213781,6 +218537,8 @@ async def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -213820,6 +218578,13 @@ async def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -213850,6 +218615,15 @@ async def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -213863,7 +218637,8 @@ async def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -214276,6 +219051,12 @@ async def detach_agent_guardrail( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -214302,6 +219083,24 @@ async def detach_agent_guardrail( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -214310,8 +219109,27 @@ async def detach_agent_guardrail( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -214323,14 +219141,56 @@ async def detach_agent_guardrail( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -214365,8 +219225,27 @@ async def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -214380,16 +219259,64 @@ async def detach_agent_guardrail( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -214418,6 +219345,44 @@ async def detach_agent_guardrail( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -214437,8 +219402,27 @@ async def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -214452,16 +219436,64 @@ async def detach_agent_guardrail( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -214493,6 +219525,8 @@ async def detach_agent_guardrail( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -214637,6 +219671,12 @@ async def detach_agent_guardrail( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -214664,8 +219704,27 @@ async def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -214678,14 +219737,59 @@ async def detach_agent_guardrail( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -214725,6 +219829,8 @@ async def detach_agent_guardrail( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -214764,6 +219870,13 @@ async def detach_agent_guardrail( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -214794,6 +219907,15 @@ async def detach_agent_guardrail( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -214807,7 +219929,8 @@ async def detach_agent_guardrail( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -215200,6 +220323,12 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -215226,6 +220355,24 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -215234,8 +220381,27 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -215247,14 +220413,56 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -215289,8 +220497,27 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -215304,16 +220531,64 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -215342,6 +220617,44 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -215361,8 +220674,27 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -215376,16 +220708,64 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -215417,6 +220797,8 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -215561,6 +220943,12 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -215588,8 +220976,27 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -215602,14 +221009,59 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -215649,6 +221101,8 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -215688,6 +221142,13 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -215718,6 +221179,15 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -215731,7 +221201,8 @@ async def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -216127,6 +221598,12 @@ async def attach_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -216153,6 +221630,24 @@ async def attach_knowledge_base( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -216161,8 +221656,27 @@ async def attach_knowledge_base( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -216174,14 +221688,56 @@ async def attach_knowledge_base( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -216216,8 +221772,27 @@ async def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -216231,16 +221806,64 @@ async def attach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -216269,6 +221892,44 @@ async def attach_knowledge_base( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -216288,8 +221949,27 @@ async def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -216303,16 +221983,64 @@ async def attach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -216344,6 +222072,8 @@ async def attach_knowledge_base( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -216488,6 +222218,12 @@ async def attach_knowledge_base( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -216515,8 +222251,27 @@ async def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -216529,14 +222284,59 @@ async def attach_knowledge_base( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -216576,6 +222376,8 @@ async def attach_knowledge_base( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -216615,6 +222417,13 @@ async def attach_knowledge_base( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -216645,6 +222454,15 @@ async def attach_knowledge_base( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -216658,7 +222476,8 @@ async def attach_knowledge_base( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -217055,6 +222874,12 @@ async def detach_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -217081,6 +222906,24 @@ async def detach_knowledge_base( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -217089,8 +222932,27 @@ async def detach_knowledge_base( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -217102,14 +222964,56 @@ async def detach_knowledge_base( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -217144,8 +223048,27 @@ async def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -217159,16 +223082,64 @@ async def detach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -217197,6 +223168,44 @@ async def detach_knowledge_base( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -217216,8 +223225,27 @@ async def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -217231,16 +223259,64 @@ async def detach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -217272,6 +223348,8 @@ async def detach_knowledge_base( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -217416,6 +223494,12 @@ async def detach_knowledge_base( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -217443,8 +223527,27 @@ async def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -217457,14 +223560,59 @@ async def detach_knowledge_base( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -217504,6 +223652,8 @@ async def detach_knowledge_base( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -217543,6 +223693,13 @@ async def detach_knowledge_base( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -217573,6 +223730,15 @@ async def detach_knowledge_base( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -217586,7 +223752,8 @@ async def detach_knowledge_base( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -218628,6 +224795,12 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -218654,6 +224827,24 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -218662,8 +224853,27 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -218675,14 +224885,56 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -218717,8 +224969,27 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -218732,16 +225003,64 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -218770,6 +225089,44 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -218789,8 +225146,27 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -218804,16 +225180,64 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -218845,6 +225269,8 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -218989,6 +225415,12 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -219016,8 +225448,27 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -219030,14 +225481,59 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -219077,6 +225573,8 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -219116,6 +225614,13 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -219146,6 +225651,15 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -219159,7 +225673,8 @@ async def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -219335,6 +225850,8 @@ async def update_agent( ], "anthropic_key_uuid": "str", # Optional. Optional anthropic key uuid for use with anthropic models. + "clear_mcp_servers": bool, # Optional. When true, removes all MCP servers + from the agent. Use this instead of sending an empty mcp_servers array. "conversation_logs_enabled": bool, # Optional. Optional update of conversation logs enabled. "description": "str", # Optional. Agent description. @@ -219347,8 +225864,26 @@ async def update_agent( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. Optional Model Provider uuid for use with provider models. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI key uuid for use with @@ -219356,6 +225891,7 @@ async def update_agent( "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. "provide_citations": bool, # Optional. + "reasoning_effort": "str", # Optional. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown * RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite * @@ -219365,12 +225901,14 @@ async def update_agent( "RETRIEVAL_METHOD_UNKNOWN", "RETRIEVAL_METHOD_REWRITE", "RETRIEVAL_METHOD_STEP_BACK", "RETRIEVAL_METHOD_SUB_QUERIES", and "RETRIEVAL_METHOD_NONE". + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. A set of abitrary tags to organize your agent. ], "temperature": 0.0, # Optional. Controls the model"u2019s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. + "thinking_token_budget": 0, # Optional. "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. @@ -219614,6 +226152,12 @@ async def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -219640,6 +226184,24 @@ async def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -219648,8 +226210,27 @@ async def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -219661,14 +226242,56 @@ async def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -219703,8 +226326,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -219718,16 +226360,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -219756,6 +226446,44 @@ async def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -219775,8 +226503,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -219790,16 +226537,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -219831,6 +226626,8 @@ async def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -219975,6 +226772,12 @@ async def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -220002,8 +226805,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -220016,14 +226838,59 @@ async def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -220063,6 +226930,8 @@ async def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -220102,6 +226971,13 @@ async def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -220132,6 +227008,15 @@ async def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -220145,7 +227030,8 @@ async def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -220472,6 +227358,12 @@ async def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -220498,6 +227390,24 @@ async def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -220506,8 +227416,27 @@ async def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -220519,14 +227448,56 @@ async def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -220561,8 +227532,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -220576,16 +227566,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -220614,6 +227652,44 @@ async def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -220633,8 +227709,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -220648,16 +227743,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -220689,6 +227832,8 @@ async def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -220833,6 +227978,12 @@ async def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -220860,8 +228011,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -220874,14 +228044,59 @@ async def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -220921,6 +228136,8 @@ async def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -220960,6 +228177,13 @@ async def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -220990,6 +228214,15 @@ async def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -221003,7 +228236,8 @@ async def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -221094,6 +228328,8 @@ async def update_agent( ], "anthropic_key_uuid": "str", # Optional. Optional anthropic key uuid for use with anthropic models. + "clear_mcp_servers": bool, # Optional. When true, removes all MCP servers + from the agent. Use this instead of sending an empty mcp_servers array. "conversation_logs_enabled": bool, # Optional. Optional update of conversation logs enabled. "description": "str", # Optional. Agent description. @@ -221106,8 +228342,26 @@ async def update_agent( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. Optional Model Provider uuid for use with provider models. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI key uuid for use with @@ -221115,6 +228369,7 @@ async def update_agent( "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. "provide_citations": bool, # Optional. + "reasoning_effort": "str", # Optional. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown * RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite * @@ -221124,12 +228379,14 @@ async def update_agent( "RETRIEVAL_METHOD_UNKNOWN", "RETRIEVAL_METHOD_REWRITE", "RETRIEVAL_METHOD_STEP_BACK", "RETRIEVAL_METHOD_SUB_QUERIES", and "RETRIEVAL_METHOD_NONE". + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. A set of abitrary tags to organize your agent. ], "temperature": 0.0, # Optional. Controls the model"u2019s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. + "thinking_token_budget": 0, # Optional. "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. @@ -221373,6 +228630,12 @@ async def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -221399,6 +228662,24 @@ async def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -221407,8 +228688,27 @@ async def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -221420,14 +228720,56 @@ async def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -221462,8 +228804,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -221477,16 +228838,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -221515,6 +228924,44 @@ async def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -221534,8 +228981,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -221549,16 +229015,64 @@ async def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -221590,6 +229104,8 @@ async def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -221734,6 +229250,12 @@ async def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -221761,8 +229283,27 @@ async def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -221775,14 +229316,59 @@ async def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -221822,6 +229408,8 @@ async def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -221861,6 +229449,13 @@ async def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -221891,6 +229486,15 @@ async def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -221904,7 +229508,8 @@ async def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -222312,6 +229917,12 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -222338,6 +229949,24 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -222346,8 +229975,27 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -222359,14 +230007,56 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -222401,8 +230091,27 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -222416,16 +230125,64 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -222454,6 +230211,44 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -222473,8 +230268,27 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -222488,16 +230302,64 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -222529,6 +230391,8 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -222673,6 +230537,12 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -222700,8 +230570,27 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -222714,14 +230603,59 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -222761,6 +230695,8 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -222800,6 +230736,13 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -222830,6 +230773,15 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -222843,7 +230795,8 @@ async def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -223256,6 +231209,12 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -223283,6 +231242,24 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: stream. }, "max_tokens": 0, # Optional. Child agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -223294,8 +231271,27 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -223308,14 +231304,59 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -223353,8 +231394,27 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -223368,10 +231428,25 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -223379,6 +231454,42 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -223409,6 +231520,45 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -223429,8 +231579,27 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -223444,10 +231613,25 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -223455,6 +231639,42 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -223488,6 +231708,8 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Child agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -223635,6 +231857,12 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -223662,8 +231890,27 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -223677,16 +231924,64 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -223730,6 +232025,8 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Child agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -223772,6 +232069,13 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -223802,6 +232106,16 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -223815,8 +232129,10 @@ async def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -224239,6 +232555,12 @@ async def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -224265,6 +232587,24 @@ async def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -224273,8 +232613,27 @@ async def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -224286,14 +232645,56 @@ async def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -224328,8 +232729,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -224343,16 +232763,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -224381,6 +232849,44 @@ async def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -224400,8 +232906,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -224415,16 +232940,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -224456,6 +233029,8 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -224600,6 +233175,12 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -224627,8 +233208,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -224641,14 +233241,59 @@ async def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -224688,6 +233333,8 @@ async def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -224727,6 +233374,13 @@ async def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -224757,6 +233411,15 @@ async def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -224770,7 +233433,8 @@ async def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -225097,6 +233761,12 @@ async def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -225123,6 +233793,24 @@ async def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -225131,8 +233819,27 @@ async def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -225144,14 +233851,56 @@ async def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -225186,8 +233935,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -225201,16 +233969,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -225239,6 +234055,44 @@ async def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -225258,8 +234112,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -225273,16 +234146,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -225314,6 +234235,8 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -225458,6 +234381,12 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -225485,8 +234414,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -225499,14 +234447,59 @@ async def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -225546,6 +234539,8 @@ async def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -225585,6 +234580,13 @@ async def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -225615,6 +234617,15 @@ async def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -225628,7 +234639,8 @@ async def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -225961,6 +234973,12 @@ async def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -225987,6 +235005,24 @@ async def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -225995,8 +235031,27 @@ async def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -226008,14 +235063,56 @@ async def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -226050,8 +235147,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -226065,16 +235181,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -226103,6 +235267,44 @@ async def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -226122,8 +235324,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -226137,16 +235358,64 @@ async def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -226178,6 +235447,8 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -226322,6 +235593,12 @@ async def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -226349,8 +235626,27 @@ async def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -226363,14 +235659,59 @@ async def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -226410,6 +235751,8 @@ async def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -226449,6 +235792,13 @@ async def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -226479,6 +235829,15 @@ async def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -226492,7 +235851,8 @@ async def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -228485,6 +237845,12 @@ async def list_agents_by_anthropic_key( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -228512,6 +237878,24 @@ async def list_agents_by_anthropic_key( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -228523,8 +237907,27 @@ async def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -228537,14 +237940,59 @@ async def list_agents_by_anthropic_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -228582,8 +238030,27 @@ async def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -228597,10 +238064,25 @@ async def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -228608,6 +238090,42 @@ async def list_agents_by_anthropic_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -228638,6 +238156,45 @@ async def list_agents_by_anthropic_key( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -228658,8 +238215,27 @@ async def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -228673,10 +238249,25 @@ async def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -228684,6 +238275,42 @@ async def list_agents_by_anthropic_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -228717,6 +238344,8 @@ async def list_agents_by_anthropic_key( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -228864,6 +238493,12 @@ async def list_agents_by_anthropic_key( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -228891,8 +238526,27 @@ async def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -228906,16 +238560,64 @@ async def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -228959,6 +238661,8 @@ async def list_agents_by_anthropic_key( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -229001,6 +238705,13 @@ async def list_agents_by_anthropic_key( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -229031,6 +238742,16 @@ async def list_agents_by_anthropic_key( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -229044,8 +238765,10 @@ async def list_agents_by_anthropic_key( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -229203,81 +238926,28 @@ async def list_agents_by_anthropic_key( return cast(JSON, deserialized) # type: ignore - @overload - async def create_evaluation_dataset( - self, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create Evaluation Dataset. - - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. - - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value - is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: - "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", and - "EVALUATION_DATASET_TYPE_NON_ADK". - "file_upload_dataset": { - "original_file_name": "str", # Optional. The original file name. - "size_in_bytes": "str", # Optional. The size of the file in bytes. - "stored_object_key": "str" # Optional. The object key the file was - stored as. - }, - "name": "str" # Optional. The name of the agent evaluation dataset. - } - - # response body for status code(s): 200 - response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def create_evaluation_dataset( + @distributed_trace_async + async def list_custom_models( self, - body: Optional[IO[bytes]] = None, *, - content_type: str = "application/json", + page: Optional[int] = None, + per_page: Optional[int] = None, + status: str = "STATUS_UNSPECIFIED", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Evaluation Dataset. + """List Custom Models. - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + To list custom models, send a GET request to ``/v2/gen-ai/custom_models``. - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :keyword page: Page number for pagination. Default value is None. + :paramtype page: int + :keyword per_page: Number of items per page. Default value is None. + :paramtype per_page: int + :keyword status: Filter by model status. Known values are: "STATUS_UNSPECIFIED", + "STATUS_IMPORTING", "STATUS_READY", "STATUS_FAILED", and "STATUS_DELETED". Default value is + "STATUS_UNSPECIFIED". + :paramtype status: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -229287,57 +238957,118 @@ async def create_evaluation_dataset( # response body for status code(s): 200 response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def create_evaluation_dataset( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create Evaluation Dataset. - - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. - - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value - is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: - "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", and - "EVALUATION_DATASET_TYPE_NON_ADK". - "file_upload_dataset": { - "original_file_name": "str", # Optional. The original file name. - "size_in_bytes": "str", # Optional. The size of the file in bytes. - "stored_object_key": "str" # Optional. The object key the file was - stored as. + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } }, - "name": "str" # Optional. The name of the agent evaluation dataset. - } - - # response body for status code(s): 200 - response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + "max_threshold": 0, # Optional. Maximum number of custom models allowed for + this team's tier. + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "models": [ + { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 + timestamp indicating when the dedicated inference deployment was + created. + "endpoints": { + "private_endpoint_fqdn": "str", # + Optional. Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # + Optional. Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier + (UUID) of the dedicated inference deployment. + "name": "str", # Optional. Human-readable + name of the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of + the region where the dedicated inference deployment is running + (e.g. "atl1"). + "state": "str", # Optional. Current + lifecycle state of the dedicated inference deployment (e.g. + "ACTIVE", "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 + timestamp indicating when the dedicated inference deployment was + last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from + the model repository. + "context_length": 0, # Optional. Maximum context length + supported by the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly + cost in dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the model was created. + "description": "str", # Optional. Description of the custom + model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., + text, image). + ], + "license": "str", # Optional. License under which the model + is distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., + text, image). + ], + "parameters": "str", # Optional. Number of parameters in the + model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # + Optional. Default value is "ACCESS_TYPE_UNSPECIFIED". Access level + required for the model repository. Known values are: + "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of + the model version. + "hf_token": "str", # Optional. User-provided + HuggingFace token for gated/private models (not persisted in + source_ref). + "prefix": "str", # Optional. Object prefix path in + the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. + Default value is "SOURCE_TYPE_UNSPECIFIED". Source from which the model + was imported. Known values are: "SOURCE_TYPE_UNSPECIFIED", + "SOURCE_TYPE_HUGGINGFACE", "SOURCE_TYPE_SPACES_BUCKET", + "SOURCE_TYPE_SDK_UPLOAD", and "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. + Known values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", + "STATUS_READY", "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces + bucket where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model + files in bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom + model. + } + ] } # response body for status code(s): 404 response == { @@ -229364,29 +239095,15 @@ async def create_evaluation_dataset( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_genai_create_evaluation_dataset_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_genai_list_custom_models_request( + page=page, + per_page=per_page, + status=status, headers=_headers, params=_params, ) @@ -229446,7 +239163,7 @@ async def create_evaluation_dataset( return cast(JSON, deserialized) # type: ignore @overload - async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + async def import_custom_model( self, body: Optional[JSON] = None, *, @@ -229454,10 +239171,9 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Default value is None. :type body: JSON @@ -229473,29 +239189,151 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab # JSON input template you can fill out and use as your body input. body = { - "files": [ - { - "file_name": "str", # Optional. Local filename. - "file_size": "str" # Optional. The size of the file in - bytes. - } - ] + "accept_terms_and_conditions": bool, # Optional. Whether the caller accepts + the terms and conditions for importing this model. + "description": "str", # Optional. Description of the model. + "name": "str", # Optional. Name for the imported model. + "preferred_gpu_region": "str", # Optional. Preferred GPU region for + deployment. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. Default value + is "ACCESS_TYPE_UNSPECIFIED". Access level required for the model repository. + Known values are: "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace token for + gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value is + "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known values + are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + } } # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. } ] } @@ -229513,7 +239351,7 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab """ @overload - async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + async def import_custom_model( self, body: Optional[IO[bytes]] = None, *, @@ -229521,10 +239359,9 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Default value is None. :type body: IO[bytes] @@ -229540,18 +239377,117 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. } ] } @@ -229569,14 +239505,13 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab """ @distributed_trace_async - async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + async def import_custom_model( self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] @@ -229589,29 +239524,151 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab # JSON input template you can fill out and use as your body input. body = { - "files": [ - { - "file_name": "str", # Optional. Local filename. - "file_size": "str" # Optional. The size of the file in - bytes. - } - ] + "accept_terms_and_conditions": bool, # Optional. Whether the caller accepts + the terms and conditions for importing this model. + "description": "str", # Optional. Description of the model. + "name": "str", # Optional. Name for the imported model. + "preferred_gpu_region": "str", # Optional. Preferred GPU region for + deployment. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. Default value + is "ACCESS_TYPE_UNSPECIFIED". Access level required for the model repository. + Known values are: "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace token for + gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value is + "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known values + are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + } } # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. } ] } @@ -229659,14 +239716,12 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab else: _json = None - _request = ( - build_genai_create_evaluation_dataset_file_upload_presigned_urls_request( - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) + _request = build_genai_import_custom_model_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, ) _request.url = self._client.format_url(_request.url) @@ -229724,12 +239779,14 @@ async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disab return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_evaluation_metrics(self, **kwargs: Any) -> JSON: + async def delete_custom_model(self, uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Evaluation Metrics. + """Delete Custom Model. - To list all evaluation metrics, send a GET request to ``/v2/gen-ai/evaluation_metrics``. + To delete a custom model, send a DELETE request to ``/v2/genai/custom_models/{uuid}``. + :param uuid: UUID of the custom model to delete. Required. + :type uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -229739,34 +239796,11 @@ async def list_evaluation_metrics(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "metrics": [ - { - "category": "METRIC_CATEGORY_UNSPECIFIED", # Optional. - Default value is "METRIC_CATEGORY_UNSPECIFIED". Known values are: - "METRIC_CATEGORY_UNSPECIFIED", "METRIC_CATEGORY_CORRECTNESS", - "METRIC_CATEGORY_USER_OUTCOMES", "METRIC_CATEGORY_SAFETY_AND_SECURITY", - "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". - "description": "str", # Optional. - "inverted": bool, # Optional. If true, the metric is - inverted, meaning that a lower value is better. - "is_metric_goal": bool, # Optional. - "metric_name": "str", # Optional. - "metric_rank": 0, # Optional. - "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. - Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". - "metric_uuid": "str", # Optional. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # - Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values - are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "range_max": 0.0, # Optional. The maximum value for the - metric. - "range_min": 0.0 # Optional. The minimum value for the - metric. - } - ] + "error": "str", # Optional. Error message if deletion failed. + "status": "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED" # Optional. Default value + is "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED". Status of delete operation. Known + values are: "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED", + "DELETE_CUSTOM_MODEL_STATUS_SUCCESS", and "DELETE_CUSTOM_MODEL_STATUS_FAIL". } # response body for status code(s): 404 response == { @@ -229798,7 +239832,8 @@ async def list_evaluation_metrics(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_evaluation_metrics_request( + _request = build_genai_delete_custom_model_request( + uuid=uuid, headers=_headers, params=_params, ) @@ -229858,18 +239893,22 @@ async def list_evaluation_metrics(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def run_evaluation_test_case( + async def update_custom_model_metadata( self, + uuid: str, body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Run an Evaluation Test Case. + """Update Custom Model Metadata. - To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + :param uuid: UUID of the custom model to update. Required. + :type uuid: str :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -229884,23 +239923,105 @@ async def run_evaluation_test_case( # JSON input template you can fill out and use as your body input. body = { - "agent_deployment_names": [ - "str" # Optional. Agent deployment names to run the test case - against (ADK agent workspaces). - ], - "agent_uuids": [ - "str" # Optional. Agent UUIDs to run the test case against (legacy - agents). - ], - "run_name": "str", # Optional. The name of the run. - "test_case_uuid": "str" # Optional. Test-case UUID to run. + "description": "str", # Optional. + "name": "str", # Optional. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "uuid": "str" # Optional. UUID of the custom model to update. } # response body for status code(s): 200 response == { - "evaluation_run_uuids": [ - "str" # Optional. - ] + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } } # response body for status code(s): 404 response == { @@ -229916,18 +240037,22 @@ async def run_evaluation_test_case( """ @overload - async def run_evaluation_test_case( + async def update_custom_model_metadata( self, + uuid: str, body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Run an Evaluation Test Case. + """Update Custom Model Metadata. - To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + :param uuid: UUID of the custom model to update. Required. + :type uuid: str :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -229942,9 +240067,93 @@ async def run_evaluation_test_case( # response body for status code(s): 200 response == { - "evaluation_run_uuids": [ - "str" # Optional. - ] + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } } # response body for status code(s): 404 response == { @@ -229960,14 +240169,17 @@ async def run_evaluation_test_case( """ @distributed_trace_async - async def run_evaluation_test_case( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + async def update_custom_model_metadata( + self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Run an Evaluation Test Case. + """Update Custom Model Metadata. - To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + :param uuid: UUID of the custom model to update. Required. + :type uuid: str :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object @@ -229979,23 +240191,105 @@ async def run_evaluation_test_case( # JSON input template you can fill out and use as your body input. body = { - "agent_deployment_names": [ - "str" # Optional. Agent deployment names to run the test case - against (ADK agent workspaces). - ], - "agent_uuids": [ - "str" # Optional. Agent UUIDs to run the test case against (legacy - agents). - ], - "run_name": "str", # Optional. The name of the run. - "test_case_uuid": "str" # Optional. Test-case UUID to run. + "description": "str", # Optional. + "name": "str", # Optional. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "uuid": "str" # Optional. UUID of the custom model to update. } # response body for status code(s): 200 response == { - "evaluation_run_uuids": [ - "str" # Optional. - ] + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } } # response body for status code(s): 404 response == { @@ -230041,7 +240335,8 @@ async def run_evaluation_test_case( else: _json = None - _request = build_genai_run_evaluation_test_case_request( + _request = build_genai_update_custom_model_metadata_request( + uuid=uuid, content_type=content_type, json=_json, content=_content, @@ -230103,16 +240398,116 @@ async def run_evaluation_test_case( return cast(JSON, deserialized) # type: ignore + @overload + async def create_evaluation_dataset( + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Evaluation Dataset. + + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and "EVALUATION_DATASET_TYPE_MODEL". + "file_upload_dataset": { + "original_file_name": "str", # Optional. The original file name. + "size_in_bytes": "str", # Optional. The size of the file in bytes. + "stored_object_key": "str" # Optional. The object key the file was + stored as. + }, + "name": "str" # Optional. The name of the agent evaluation dataset. + } + + # response body for status code(s): 200 + response == { + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_evaluation_dataset( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Evaluation Dataset. + + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + @distributed_trace_async - async def get_evaluation_run(self, evaluation_run_uuid: str, **kwargs: Any) -> JSON: + async def create_evaluation_dataset( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve Information About an Existing Evaluation Run. + """Create Evaluation Dataset. - To retrive information about an existing evaluation run, send a GET request to - ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}``. + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. - :param evaluation_run_uuid: Evaluation run UUID. Required. - :type evaluation_run_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -230120,73 +240515,24 @@ async def get_evaluation_run(self, evaluation_run_uuid: str, **kwargs: Any) -> J Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and "EVALUATION_DATASET_TYPE_MODEL". + "file_upload_dataset": { + "original_file_name": "str", # Optional. The original file name. + "size_in_bytes": "str", # Optional. The size of the file in bytes. + "stored_object_key": "str" # Optional. The object key the file was + stored as. + }, + "name": "str" # Optional. The name of the agent evaluation dataset. + } + # response body for status code(s): 200 response == { - "evaluation_run": { - "agent_deleted": bool, # Optional. Whether agent is deleted. - "agent_deployment_name": "str", # Optional. The agent deployment - name. - "agent_name": "str", # Optional. Agent name. - "agent_uuid": "str", # Optional. Agent UUID. - "agent_version_hash": "str", # Optional. Version hash. - "agent_workspace_uuid": "str", # Optional. Agent workspace uuid. - "created_by_user_email": "str", # Optional. - "created_by_user_id": "str", # Optional. - "error_description": "str", # Optional. The error description. - "evaluation_run_uuid": "str", # Optional. Evaluation run UUID. - "evaluation_test_case_workspace_uuid": "str", # Optional. Evaluation - test case workspace uuid. - "finished_at": "2020-02-20 00:00:00", # Optional. Run end time. - "pass_status": bool, # Optional. The pass status of the evaluation - run based on the star metric. - "queued_at": "2020-02-20 00:00:00", # Optional. Run queued time. - "run_level_metric_results": [ - { - "error_description": "str", # Optional. Error - description if the metric could not be calculated. - "metric_name": "str", # Optional. Metric name. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", - # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known - values are: "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and - "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value of the - metric as a number. - "reasoning": "str", # Optional. Reasoning of the - metric result. - "string_value": "str" # Optional. The value of the - metric as a string. - } - ], - "run_name": "str", # Optional. Run name. - "star_metric_result": { - "error_description": "str", # Optional. Error description if - the metric could not be calculated. - "metric_name": "str", # Optional. Metric name. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # - Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values - are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value of the metric as - a number. - "reasoning": "str", # Optional. Reasoning of the metric - result. - "string_value": "str" # Optional. The value of the metric as - a string. - }, - "started_at": "2020-02-20 00:00:00", # Optional. Run start time. - "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. Default - value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run Statuses. Known - values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", "EVALUATION_RUN_QUEUED", - "EVALUATION_RUN_RUNNING_DATASET", "EVALUATION_RUN_EVALUATING_RESULTS", - "EVALUATION_RUN_CANCELLING", "EVALUATION_RUN_CANCELLED", - "EVALUATION_RUN_SUCCESSFUL", "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and - "EVALUATION_RUN_FAILED". - "test_case_description": "str", # Optional. Test case description. - "test_case_name": "str", # Optional. Test case name. - "test_case_uuid": "str", # Optional. Test-case UUID. - "test_case_version": 0 # Optional. Test-case-version. - } + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. } # response body for status code(s): 404 response == { @@ -230213,13 +240559,29 @@ async def get_evaluation_run(self, evaluation_run_uuid: str, **kwargs: Any) -> J } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_evaluation_run_request( - evaluation_run_uuid=evaluation_run_uuid, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_create_evaluation_dataset_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -230278,27 +240640,25 @@ async def get_evaluation_run(self, evaluation_run_uuid: str, **kwargs: Any) -> J return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def get_evaluation_run_results( + @overload + async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long self, - evaluation_run_uuid: str, + body: Optional[JSON] = None, *, - page: Optional[int] = None, - per_page: Optional[int] = None, + content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Results of an Evaluation Run. + """Create Presigned URLs for Evaluation Dataset File Upload. - To retrieve results of an evaluation run, send a GET request to - ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results``. + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. - :param evaluation_run_uuid: Evaluation run UUID. Required. - :type evaluation_run_uuid: str - :keyword page: Page number. Default value is None. - :paramtype page: int - :keyword per_page: Items per page. Default value is None. - :paramtype per_page: int + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -230306,184 +240666,147 @@ async def get_evaluation_run_results( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + # response body for status code(s): 200 response == { - "evaluation_run": { - "agent_deleted": bool, # Optional. Whether agent is deleted. - "agent_deployment_name": "str", # Optional. The agent deployment - name. - "agent_name": "str", # Optional. Agent name. - "agent_uuid": "str", # Optional. Agent UUID. - "agent_version_hash": "str", # Optional. Version hash. - "agent_workspace_uuid": "str", # Optional. Agent workspace uuid. - "created_by_user_email": "str", # Optional. - "created_by_user_id": "str", # Optional. - "error_description": "str", # Optional. The error description. - "evaluation_run_uuid": "str", # Optional. Evaluation run UUID. - "evaluation_test_case_workspace_uuid": "str", # Optional. Evaluation - test case workspace uuid. - "finished_at": "2020-02-20 00:00:00", # Optional. Run end time. - "pass_status": bool, # Optional. The pass status of the evaluation - run based on the star metric. - "queued_at": "2020-02-20 00:00:00", # Optional. Run queued time. - "run_level_metric_results": [ - { - "error_description": "str", # Optional. Error - description if the metric could not be calculated. - "metric_name": "str", # Optional. Metric name. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", - # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known - values are: "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and - "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value of the - metric as a number. - "reasoning": "str", # Optional. Reasoning of the - metric result. - "string_value": "str" # Optional. The value of the - metric as a string. - } - ], - "run_name": "str", # Optional. Run name. - "star_metric_result": { - "error_description": "str", # Optional. Error description if - the metric could not be calculated. - "metric_name": "str", # Optional. Metric name. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # - Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values - are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value of the metric as - a number. - "reasoning": "str", # Optional. Reasoning of the metric - result. - "string_value": "str" # Optional. The value of the metric as - a string. - }, - "started_at": "2020-02-20 00:00:00", # Optional. Run start time. - "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. Default - value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run Statuses. Known - values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", "EVALUATION_RUN_QUEUED", - "EVALUATION_RUN_RUNNING_DATASET", "EVALUATION_RUN_EVALUATING_RESULTS", - "EVALUATION_RUN_CANCELLING", "EVALUATION_RUN_CANCELLED", - "EVALUATION_RUN_SUCCESSFUL", "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and - "EVALUATION_RUN_FAILED". - "test_case_description": "str", # Optional. Test case description. - "test_case_name": "str", # Optional. Test case name. - "test_case_uuid": "str", # Optional. Test-case UUID. - "test_case_version": 0 # Optional. Test-case-version. - }, - "links": { - "pages": { - "first": "str", # Optional. First page. - "last": "str", # Optional. Last page. - "next": "str", # Optional. Next page. - "previous": "str" # Optional. Previous page. + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. } - }, - "meta": { - "page": 0, # Optional. The current page. - "pages": 0, # Optional. Total number of pages. - "total": 0 # Optional. Total amount of items over all pages. - }, - "prompts": [ + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Evaluation Dataset File Upload. + + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ { - "evaluation_trace_spans": [ - { - "created_at": "2020-02-20 00:00:00", # - Optional. When the span was created. - "input": {}, # Optional. Input data for the - span (flexible structure - can be messages array, string, etc.). - "name": "str", # Optional. Name/identifier - for the span. - "output": {}, # Optional. Output data from - the span (flexible structure - can be message, string, etc.). - "retriever_chunks": [ - { - "chunk_usage_pct": 0.0, # - Optional. The usage percentage of the chunk. - "chunk_used": bool, # - Optional. Indicates if the chunk was used in the prompt. - "index_uuid": "str", # - Optional. The index uuid (Knowledge Base) of the chunk. - "source_name": "str", # - Optional. The source name for the chunk, e.g., the file - name or document title. - "text": "str" # Optional. - Text content of the chunk. - } - ], - "span_level_metric_results": [ - { - "error_description": "str", - # Optional. Error description if the metric could not be - calculated. - "metric_name": "str", # - Optional. Metric name. - "metric_value_type": - "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default - value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values - are: "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", - and "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # - Optional. The value of the metric as a number. - "reasoning": "str", # - Optional. Reasoning of the metric result. - "string_value": "str" # - Optional. The value of the metric as a string. - } - ], - "type": "TRACE_SPAN_TYPE_UNKNOWN" # - Optional. Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of - spans in a trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", - "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", and - "TRACE_SPAN_TYPE_TOOL". - } - ], - "ground_truth": "str", # Optional. The ground truth for the - prompt. - "input": "str", # Optional. The prompt level results. - "input_tokens": "str", # Optional. The number of input - tokens used in the prompt. - "output": "str", # Optional. The prompt level results. - "output_tokens": "str", # Optional. The number of output - tokens used in the prompt. - "prompt_chunks": [ - { - "chunk_usage_pct": 0.0, # Optional. The - usage percentage of the chunk. - "chunk_used": bool, # Optional. Indicates if - the chunk was used in the prompt. - "index_uuid": "str", # Optional. The index - uuid (Knowledge Base) of the chunk. - "source_name": "str", # Optional. The source - name for the chunk, e.g., the file name or document title. - "text": "str" # Optional. Text content of - the chunk. - } - ], - "prompt_id": 0, # Optional. Prompt ID. - "prompt_level_metric_results": [ - { - "error_description": "str", # Optional. - Error description if the metric could not be calculated. - "metric_name": "str", # Optional. Metric - name. - "metric_value_type": - "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is - "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: - "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value - of the metric as a number. - "reasoning": "str", # Optional. Reasoning of - the metric result. - "string_value": "str" # Optional. The value - of the metric as a string. - } - ], - "trace_id": "str" # Optional. The trace id for the prompt. + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Evaluation Dataset File Upload. + + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. } ] } @@ -230512,17 +240835,33 @@ async def get_evaluation_run_results( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_evaluation_run_results_request( - evaluation_run_uuid=evaluation_run_uuid, - page=page, - per_page=per_page, - headers=_headers, - params=_params, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = ( + build_genai_create_evaluation_dataset_file_upload_presigned_urls_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) ) _request.url = self._client.format_url(_request.url) @@ -230580,19 +240919,17 @@ async def get_evaluation_run_results( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_evaluation_run_prompt_results( - self, evaluation_run_uuid: str, prompt_id: int, **kwargs: Any + async def get_evaluation_dataset_download_url( + self, dataset_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Results of an Evaluation Run Prompt. + """Get Download URL for Evaluation Dataset. - To retrieve results of an evaluation run, send a GET request to - ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}``. + To get a presigned download URL for an evaluation dataset, send a GET request to + ``/v2/genai/evaluation_datasets/{dataset_uuid}/download_url``. - :param evaluation_run_uuid: Evaluation run UUID. Required. - :type evaluation_run_uuid: str - :param prompt_id: Prompt ID to get results for. Required. - :type prompt_id: int + :param dataset_uuid: UUID of the evaluation dataset. Required. + :type dataset_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -230602,101 +240939,9 @@ async def get_evaluation_run_prompt_results( # response body for status code(s): 200 response == { - "prompt": { - "evaluation_trace_spans": [ - { - "created_at": "2020-02-20 00:00:00", # Optional. - When the span was created. - "input": {}, # Optional. Input data for the span - (flexible structure - can be messages array, string, etc.). - "name": "str", # Optional. Name/identifier for the - span. - "output": {}, # Optional. Output data from the span - (flexible structure - can be message, string, etc.). - "retriever_chunks": [ - { - "chunk_usage_pct": 0.0, # Optional. - The usage percentage of the chunk. - "chunk_used": bool, # Optional. - Indicates if the chunk was used in the prompt. - "index_uuid": "str", # Optional. The - index uuid (Knowledge Base) of the chunk. - "source_name": "str", # Optional. - The source name for the chunk, e.g., the file name or - document title. - "text": "str" # Optional. Text - content of the chunk. - } - ], - "span_level_metric_results": [ - { - "error_description": "str", # - Optional. Error description if the metric could not be - calculated. - "metric_name": "str", # Optional. - Metric name. - "metric_value_type": - "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value - is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: - "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and - "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The - value of the metric as a number. - "reasoning": "str", # Optional. - Reasoning of the metric result. - "string_value": "str" # Optional. - The value of the metric as a string. - } - ], - "type": "TRACE_SPAN_TYPE_UNKNOWN" # Optional. - Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a - trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", - "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", and - "TRACE_SPAN_TYPE_TOOL". - } - ], - "ground_truth": "str", # Optional. The ground truth for the prompt. - "input": "str", # Optional. - "input_tokens": "str", # Optional. The number of input tokens used - in the prompt. - "output": "str", # Optional. - "output_tokens": "str", # Optional. The number of output tokens used - in the prompt. - "prompt_chunks": [ - { - "chunk_usage_pct": 0.0, # Optional. The usage - percentage of the chunk. - "chunk_used": bool, # Optional. Indicates if the - chunk was used in the prompt. - "index_uuid": "str", # Optional. The index uuid - (Knowledge Base) of the chunk. - "source_name": "str", # Optional. The source name - for the chunk, e.g., the file name or document title. - "text": "str" # Optional. Text content of the chunk. - } - ], - "prompt_id": 0, # Optional. Prompt ID. - "prompt_level_metric_results": [ - { - "error_description": "str", # Optional. Error - description if the metric could not be calculated. - "metric_name": "str", # Optional. Metric name. - "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", - # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known - values are: "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and - "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value of the - metric as a number. - "reasoning": "str", # Optional. Reasoning of the - metric result. - "string_value": "str" # Optional. The value of the - metric as a string. - } - ], - "trace_id": "str" # Optional. The trace id for the prompt. - } + "download_url": "str", # Optional. The presigned URL to download the dataset + file. + "expires_at": "2020-02-20 00:00:00" # Optional. The time the URL expires at. } # response body for status code(s): 404 response == { @@ -230728,9 +240973,8 @@ async def get_evaluation_run_prompt_results( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_evaluation_run_prompt_results_request( - evaluation_run_uuid=evaluation_run_uuid, - prompt_id=prompt_id, + _request = build_genai_get_evaluation_dataset_download_url_request( + dataset_uuid=dataset_uuid, headers=_headers, params=_params, ) @@ -230790,11 +241034,11 @@ async def get_evaluation_run_prompt_results( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: + async def list_evaluation_metrics(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Evaluation Test Cases. + """List Evaluation Metrics. - To list all evaluation test cases, send a GET request to ``/v2/gen-ai/evaluation_test_cases``. + To list all evaluation metrics, send a GET request to ``/v2/gen-ai/evaluation_metrics``. :return: JSON object :rtype: JSON @@ -230805,120 +241049,40 @@ async def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "evaluation_test_cases": [ + "metrics": [ { - "archived_at": "2020-02-20 00:00:00", # Optional. - Alternative way of authentication for internal usage only - should not be - exposed to public api. - "created_at": "2020-02-20 00:00:00", # Optional. Alternative - way of authentication for internal usage only - should not be exposed to - public api. - "created_by_user_email": "str", # Optional. Alternative way - of authentication for internal usage only - should not be exposed to - public api. - "created_by_user_id": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "dataset": { - "created_at": "2020-02-20 00:00:00", # Optional. - Time created at. - "dataset_name": "str", # Optional. Name of the - dataset. - "dataset_uuid": "str", # Optional. UUID of the - dataset. - "file_size": "str", # Optional. The size of the - dataset uploaded file in bytes. - "has_ground_truth": bool, # Optional. Does the - dataset have a ground truth column?. - "row_count": 0 # Optional. Number of rows in the - dataset. - }, - "dataset_name": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "dataset_uuid": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "description": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "latest_version_number_of_runs": 0, # Optional. Alternative - way of authentication for internal usage only - should not be exposed to - public api. - "metrics": [ - { - "category": "METRIC_CATEGORY_UNSPECIFIED", # - Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known - values are: "METRIC_CATEGORY_UNSPECIFIED", - "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", - "METRIC_CATEGORY_SAFETY_AND_SECURITY", - "METRIC_CATEGORY_CONTEXT_QUALITY", and - "METRIC_CATEGORY_MODEL_FIT". - "description": "str", # Optional. - Alternative way of authentication for internal usage only - - should not be exposed to public api. - "inverted": bool, # Optional. If true, the - metric is inverted, meaning that a lower value is better. - "is_metric_goal": bool, # Optional. - Alternative way of authentication for internal usage only - - should not be exposed to public api. - "metric_name": "str", # Optional. - Alternative way of authentication for internal usage only - - should not be exposed to public api. - "metric_rank": 0, # Optional. Alternative - way of authentication for internal usage only - should not be - exposed to public api. - "metric_type": "METRIC_TYPE_UNSPECIFIED", # - Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known - values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and "METRIC_TYPE_RAG_AND_TOOL". - "metric_uuid": "str", # Optional. - Alternative way of authentication for internal usage only - - should not be exposed to public api. - "metric_value_type": - "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is - "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: - "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "range_max": 0.0, # Optional. The maximum - value for the metric. - "range_min": 0.0 # Optional. The minimum - value for the metric. - } - ], - "name": "str", # Optional. Alternative way of authentication - for internal usage only - should not be exposed to public api. - "star_metric": { - "metric_uuid": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to - public api. - "name": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to - public api. - "success_threshold": 0.0, # Optional. The success - threshold for the star metric. This is a value that the metric must - reach to be considered successful. - "success_threshold_pct": 0 # Optional. The success - threshold for the star metric. This is a percentage value between 0 - and 100. - }, - "test_case_uuid": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "total_runs": 0, # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "updated_at": "2020-02-20 00:00:00", # Optional. Alternative - way of authentication for internal usage only - should not be exposed to - public api. - "updated_by_user_email": "str", # Optional. Alternative way - of authentication for internal usage only - should not be exposed to - public api. - "updated_by_user_id": "str", # Optional. Alternative way of - authentication for internal usage only - should not be exposed to public - api. - "version": 0 # Optional. Alternative way of authentication - for internal usage only - should not be exposed to public api. + "category": "METRIC_CATEGORY_UNSPECIFIED", # Optional. + Default value is "METRIC_CATEGORY_UNSPECIFIED". Known values are: + "METRIC_CATEGORY_UNSPECIFIED", "METRIC_CATEGORY_CORRECTNESS", + "METRIC_CATEGORY_USER_OUTCOMES", "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", # + Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. + "metric_name": "str", # Optional. + "metric_rank": 0, # Optional. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. + Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for the + metric. + "range_min": 0.0 # Optional. The minimum value for the + metric. } ] } @@ -230952,7 +241116,7 @@ async def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_evaluation_test_cases_request( + _request = build_genai_list_evaluation_metrics_request( headers=_headers, params=_params, ) @@ -231012,7 +241176,7 @@ async def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def create_evaluation_test_case( + async def run_evaluation_test_case( self, body: Optional[JSON] = None, *, @@ -231020,9 +241184,9 @@ async def create_evaluation_test_case( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Evaluation Test Case. + """Run an Evaluation Test Case. - To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. + To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. :param body: Default value is None. :type body: JSON @@ -231038,29 +241202,23 @@ async def create_evaluation_test_case( # JSON input template you can fill out and use as your body input. body = { - "agent_workspace_name": "str", # Optional. - "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case - is executed. - "description": "str", # Optional. Description of the test case. - "metrics": [ - "str" # Optional. Full metric list to use for evaluation test case. + "agent_deployment_names": [ + "str" # Optional. Agent deployment names to run the test case + against (ADK agent workspaces). ], - "name": "str", # Optional. Name of the test case. - "star_metric": { - "metric_uuid": "str", # Optional. - "name": "str", # Optional. - "success_threshold": 0.0, # Optional. The success threshold for the - star metric. This is a value that the metric must reach to be considered - successful. - "success_threshold_pct": 0 # Optional. The success threshold for the - star metric. This is a percentage value between 0 and 100. - }, - "workspace_uuid": "str" # Optional. The workspace uuid. + "agent_uuids": [ + "str" # Optional. Agent UUIDs to run the test case against (legacy + agents). + ], + "run_name": "str", # Optional. The name of the run. + "test_case_uuid": "str" # Optional. Test-case UUID to run. } # response body for status code(s): 200 response == { - "test_case_uuid": "str" # Optional. Test"u2011case UUID. + "evaluation_run_uuids": [ + "str" # Optional. + ] } # response body for status code(s): 404 response == { @@ -231076,7 +241234,7 @@ async def create_evaluation_test_case( """ @overload - async def create_evaluation_test_case( + async def run_evaluation_test_case( self, body: Optional[IO[bytes]] = None, *, @@ -231084,9 +241242,9 @@ async def create_evaluation_test_case( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Evaluation Test Case. + """Run an Evaluation Test Case. - To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. + To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. :param body: Default value is None. :type body: IO[bytes] @@ -231102,7 +241260,9 @@ async def create_evaluation_test_case( # response body for status code(s): 200 response == { - "test_case_uuid": "str" # Optional. Test"u2011case UUID. + "evaluation_run_uuids": [ + "str" # Optional. + ] } # response body for status code(s): 404 response == { @@ -231118,13 +241278,13 @@ async def create_evaluation_test_case( """ @distributed_trace_async - async def create_evaluation_test_case( + async def run_evaluation_test_case( self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Evaluation Test Case. + """Run an Evaluation Test Case. - To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. + To run an evaluation test case, send a POST request to ``/v2/gen-ai/evaluation_runs``. :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] @@ -231137,29 +241297,23 @@ async def create_evaluation_test_case( # JSON input template you can fill out and use as your body input. body = { - "agent_workspace_name": "str", # Optional. - "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case - is executed. - "description": "str", # Optional. Description of the test case. - "metrics": [ - "str" # Optional. Full metric list to use for evaluation test case. + "agent_deployment_names": [ + "str" # Optional. Agent deployment names to run the test case + against (ADK agent workspaces). ], - "name": "str", # Optional. Name of the test case. - "star_metric": { - "metric_uuid": "str", # Optional. - "name": "str", # Optional. - "success_threshold": 0.0, # Optional. The success threshold for the - star metric. This is a value that the metric must reach to be considered - successful. - "success_threshold_pct": 0 # Optional. The success threshold for the - star metric. This is a percentage value between 0 and 100. - }, - "workspace_uuid": "str" # Optional. The workspace uuid. + "agent_uuids": [ + "str" # Optional. Agent UUIDs to run the test case against (legacy + agents). + ], + "run_name": "str", # Optional. The name of the run. + "test_case_uuid": "str" # Optional. Test-case UUID to run. } # response body for status code(s): 200 response == { - "test_case_uuid": "str" # Optional. Test"u2011case UUID. + "evaluation_run_uuids": [ + "str" # Optional. + ] } # response body for status code(s): 404 response == { @@ -231205,7 +241359,7 @@ async def create_evaluation_test_case( else: _json = None - _request = build_genai_create_evaluation_test_case_request( + _request = build_genai_run_evaluation_test_case_request( content_type=content_type, json=_json, content=_content, @@ -231268,23 +241422,15 @@ async def create_evaluation_test_case( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_evaluation_runs_by_test_case( - self, - evaluation_test_case_uuid: str, - *, - evaluation_test_case_version: Optional[int] = None, - **kwargs: Any - ) -> JSON: + async def get_evaluation_run(self, evaluation_run_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Evaluation Runs by Test Case. + """Retrieve Information About an Existing Evaluation Run. - To list all evaluation runs by test case, send a GET request to - ``/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs``. + To retrive information about an existing evaluation run, send a GET request to + ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}``. - :param evaluation_test_case_uuid: Evaluation run UUID. Required. - :type evaluation_test_case_uuid: str - :keyword evaluation_test_case_version: Version of the test case. Default value is None. - :paramtype evaluation_test_case_version: int + :param evaluation_run_uuid: Evaluation run UUID. Required. + :type evaluation_run_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -231294,53 +241440,26 @@ async def list_evaluation_runs_by_test_case( # response body for status code(s): 200 response == { - "evaluation_runs": [ - { - "agent_deleted": bool, # Optional. Whether agent is deleted. - "agent_deployment_name": "str", # Optional. The agent - deployment name. - "agent_name": "str", # Optional. Agent name. - "agent_uuid": "str", # Optional. Agent UUID. - "agent_version_hash": "str", # Optional. Version hash. - "agent_workspace_uuid": "str", # Optional. Agent workspace - uuid. - "created_by_user_email": "str", # Optional. List of - evaluation runs. - "created_by_user_id": "str", # Optional. List of evaluation - runs. - "error_description": "str", # Optional. The error - description. - "evaluation_run_uuid": "str", # Optional. Evaluation run - UUID. - "evaluation_test_case_workspace_uuid": "str", # Optional. - Evaluation test case workspace uuid. - "finished_at": "2020-02-20 00:00:00", # Optional. Run end - time. - "pass_status": bool, # Optional. The pass status of the - evaluation run based on the star metric. - "queued_at": "2020-02-20 00:00:00", # Optional. Run queued - time. - "run_level_metric_results": [ - { - "error_description": "str", # Optional. - Error description if the metric could not be calculated. - "metric_name": "str", # Optional. Metric - name. - "metric_value_type": - "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is - "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: - "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "number_value": 0.0, # Optional. The value - of the metric as a number. - "reasoning": "str", # Optional. Reasoning of - the metric result. - "string_value": "str" # Optional. The value - of the metric as a string. - } - ], - "run_name": "str", # Optional. Run name. - "star_metric_result": { + "evaluation_run": { + "agent_deleted": bool, # Optional. Whether agent is deleted. + "agent_deployment_name": "str", # Optional. The agent deployment + name. + "agent_name": "str", # Optional. Agent name. + "agent_uuid": "str", # Optional. Agent UUID. + "agent_version_hash": "str", # Optional. Version hash. + "agent_workspace_uuid": "str", # Optional. Agent workspace uuid. + "created_by_user_email": "str", # Optional. + "created_by_user_id": "str", # Optional. + "error_description": "str", # Optional. The error description. + "evaluation_run_uuid": "str", # Optional. Evaluation run UUID. + "evaluation_test_case_workspace_uuid": "str", # Optional. Evaluation + test case workspace uuid. + "finished_at": "2020-02-20 00:00:00", # Optional. Run end time. + "pass_status": bool, # Optional. The pass status of the evaluation + run based on the star metric. + "queued_at": "2020-02-20 00:00:00", # Optional. Run queued time. + "run_level_metric_results": [ + { "error_description": "str", # Optional. Error description if the metric could not be calculated. "metric_name": "str", # Optional. Metric name. @@ -231355,33 +241474,47 @@ async def list_evaluation_runs_by_test_case( metric result. "string_value": "str" # Optional. The value of the metric as a string. - }, - "started_at": "2020-02-20 00:00:00", # Optional. Run start - time. - "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. - Default value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run - Statuses. Known values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", - "EVALUATION_RUN_QUEUED", "EVALUATION_RUN_RUNNING_DATASET", - "EVALUATION_RUN_EVALUATING_RESULTS", "EVALUATION_RUN_CANCELLING", - "EVALUATION_RUN_CANCELLED", "EVALUATION_RUN_SUCCESSFUL", - "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and "EVALUATION_RUN_FAILED". - "test_case_description": "str", # Optional. Test case - description. - "test_case_name": "str", # Optional. Test case name. - "test_case_uuid": "str", # Optional. Test-case UUID. - "test_case_version": 0 # Optional. Test-case-version. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support + } + ], + "run_name": "str", # Optional. Run name. + "star_metric_result": { + "error_description": "str", # Optional. Error description if + the metric could not be calculated. + "metric_name": "str", # Optional. Metric name. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value of the metric as + a number. + "reasoning": "str", # Optional. Reasoning of the metric + result. + "string_value": "str" # Optional. The value of the metric as + a string. + }, + "started_at": "2020-02-20 00:00:00", # Optional. Run start time. + "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. Default + value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run Statuses. Known + values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", "EVALUATION_RUN_QUEUED", + "EVALUATION_RUN_RUNNING_DATASET", "EVALUATION_RUN_EVALUATING_RESULTS", + "EVALUATION_RUN_CANCELLING", "EVALUATION_RUN_CANCELLED", + "EVALUATION_RUN_SUCCESSFUL", "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "EVALUATION_RUN_FAILED". + "test_case_description": "str", # Optional. Test case description. + "test_case_name": "str", # Optional. Test case name. + "test_case_uuid": "str", # Optional. Test-case UUID. + "test_case_version": 0 # Optional. Test-case-version. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support tickets to help identify the issue. } """ @@ -231403,9 +241536,8 @@ async def list_evaluation_runs_by_test_case( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_evaluation_runs_by_test_case_request( - evaluation_test_case_uuid=evaluation_test_case_uuid, - evaluation_test_case_version=evaluation_test_case_version, + _request = build_genai_get_evaluation_run_request( + evaluation_run_uuid=evaluation_run_uuid, headers=_headers, params=_params, ) @@ -231465,23 +241597,26 @@ async def list_evaluation_runs_by_test_case( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_evaluation_test_case( + async def get_evaluation_run_results( self, - test_case_uuid: str, + evaluation_run_uuid: str, *, - evaluation_test_case_version: Optional[int] = None, + page: Optional[int] = None, + per_page: Optional[int] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Information About an Existing Evaluation Test Case. + """Retrieve Results of an Evaluation Run. - To retrive information about an existing evaluation test case, send a GET request to - ``/v2/gen-ai/evaluation_test_case/{test_case_uuid}``. + To retrieve results of an evaluation run, send a GET request to + ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results``. - :param test_case_uuid: The test case uuid to retrieve. Required. - :type test_case_uuid: str - :keyword evaluation_test_case_version: Version of the test case. Default value is None. - :paramtype evaluation_test_case_version: int + :param evaluation_run_uuid: Evaluation run UUID. Required. + :type evaluation_run_uuid: str + :keyword page: Page number. Default value is None. + :paramtype page: int + :keyword per_page: Items per page. Default value is None. + :paramtype per_page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -231491,73 +241626,346 @@ async def get_evaluation_test_case( # response body for status code(s): 200 response == { - "evaluation_test_case": { - "archived_at": "2020-02-20 00:00:00", # Optional. - "created_at": "2020-02-20 00:00:00", # Optional. + "evaluation_run": { + "agent_deleted": bool, # Optional. Whether agent is deleted. + "agent_deployment_name": "str", # Optional. The agent deployment + name. + "agent_name": "str", # Optional. Agent name. + "agent_uuid": "str", # Optional. Agent UUID. + "agent_version_hash": "str", # Optional. Version hash. + "agent_workspace_uuid": "str", # Optional. Agent workspace uuid. "created_by_user_email": "str", # Optional. "created_by_user_id": "str", # Optional. - "dataset": { - "created_at": "2020-02-20 00:00:00", # Optional. Time - created at. - "dataset_name": "str", # Optional. Name of the dataset. - "dataset_uuid": "str", # Optional. UUID of the dataset. - "file_size": "str", # Optional. The size of the dataset - uploaded file in bytes. - "has_ground_truth": bool, # Optional. Does the dataset have - a ground truth column?. - "row_count": 0 # Optional. Number of rows in the dataset. - }, - "dataset_name": "str", # Optional. - "dataset_uuid": "str", # Optional. - "description": "str", # Optional. - "latest_version_number_of_runs": 0, # Optional. - "metrics": [ + "error_description": "str", # Optional. The error description. + "evaluation_run_uuid": "str", # Optional. Evaluation run UUID. + "evaluation_test_case_workspace_uuid": "str", # Optional. Evaluation + test case workspace uuid. + "finished_at": "2020-02-20 00:00:00", # Optional. Run end time. + "pass_status": bool, # Optional. The pass status of the evaluation + run based on the star metric. + "queued_at": "2020-02-20 00:00:00", # Optional. Run queued time. + "run_level_metric_results": [ { - "category": "METRIC_CATEGORY_UNSPECIFIED", # - Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known - values are: "METRIC_CATEGORY_UNSPECIFIED", - "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", - "METRIC_CATEGORY_SAFETY_AND_SECURITY", - "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". - "description": "str", # Optional. - "inverted": bool, # Optional. If true, the metric is - inverted, meaning that a lower value is better. - "is_metric_goal": bool, # Optional. - "metric_name": "str", # Optional. - "metric_rank": 0, # Optional. - "metric_type": "METRIC_TYPE_UNSPECIFIED", # - Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values - are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". - "metric_uuid": "str", # Optional. + "error_description": "str", # Optional. Error + description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric name. "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". - "range_max": 0.0, # Optional. The maximum value for - the metric. - "range_min": 0.0 # Optional. The minimum value for - the metric. + "number_value": 0.0, # Optional. The value of the + metric as a number. + "reasoning": "str", # Optional. Reasoning of the + metric result. + "string_value": "str" # Optional. The value of the + metric as a string. } ], - "name": "str", # Optional. - "star_metric": { - "metric_uuid": "str", # Optional. - "name": "str", # Optional. - "success_threshold": 0.0, # Optional. The success threshold - for the star metric. This is a value that the metric must reach to be - considered successful. - "success_threshold_pct": 0 # Optional. The success threshold - for the star metric. This is a percentage value between 0 and 100. + "run_name": "str", # Optional. Run name. + "star_metric_result": { + "error_description": "str", # Optional. Error description if + the metric could not be calculated. + "metric_name": "str", # Optional. Metric name. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value of the metric as + a number. + "reasoning": "str", # Optional. Reasoning of the metric + result. + "string_value": "str" # Optional. The value of the metric as + a string. }, - "test_case_uuid": "str", # Optional. - "total_runs": 0, # Optional. - "updated_at": "2020-02-20 00:00:00", # Optional. - "updated_by_user_email": "str", # Optional. - "updated_by_user_id": "str", # Optional. - "version": 0 # Optional. - } + "started_at": "2020-02-20 00:00:00", # Optional. Run start time. + "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. Default + value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run Statuses. Known + values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", "EVALUATION_RUN_QUEUED", + "EVALUATION_RUN_RUNNING_DATASET", "EVALUATION_RUN_EVALUATING_RESULTS", + "EVALUATION_RUN_CANCELLING", "EVALUATION_RUN_CANCELLED", + "EVALUATION_RUN_SUCCESSFUL", "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "EVALUATION_RUN_FAILED". + "test_case_description": "str", # Optional. Test case description. + "test_case_name": "str", # Optional. Test case name. + "test_case_uuid": "str", # Optional. Test-case UUID. + "test_case_version": 0 # Optional. Test-case-version. + }, + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "prompts": [ + { + "evaluation_trace_spans": [ + { + "created_at": "2020-02-20 00:00:00", # + Optional. When the span was created. + "input": {}, # Optional. Input data for the + span (flexible structure - can be messages array, string, etc.). + "name": "str", # Optional. Name/identifier + for the span. + "output": {}, # Optional. Output data from + the span (flexible structure - can be message, string, etc.). + "retriever_chunks": [ + { + "chunk_usage_pct": 0.0, # + Optional. The usage percentage of the chunk. + "chunk_used": bool, # + Optional. Indicates if the chunk was used in the prompt. + "index_uuid": "str", # + Optional. The index uuid (Knowledge Base) of the chunk. + "source_name": "str", # + Optional. The source name for the chunk, e.g., the file + name or document title. + "text": "str" # Optional. + Text content of the chunk. + } + ], + "span_level_metric_results": [ + { + "error_description": "str", + # Optional. Error description if the metric could not be + calculated. + "metric_name": "str", # + Optional. Metric name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default + value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", + and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # + Optional. The value of the metric as a number. + "reasoning": "str", # + Optional. Reasoning of the metric result. + "string_value": "str" # + Optional. The value of the metric as a string. + } + ], + "spans": [ + { + "agent": { + "agent_type": + "AGENT_TYPE_UNSPECIFIED", # Optional. Default value + is "AGENT_TYPE_UNSPECIFIED". Agent span. Known values + are: "AGENT_TYPE_UNSPECIFIED", "AGENT_TYPE_DEFAULT", + "AGENT_TYPE_PLANNER", "AGENT_TYPE_REACT", + "AGENT_TYPE_REFLECTION", "AGENT_TYPE_ROUTER", + "AGENT_TYPE_CLASSIFIER", "AGENT_TYPE_SUPERVISOR", and + "AGENT_TYPE_JUDGE". + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "redacted_input": + "str", # Optional. Child spans - must contain + between 1 and 999 spans Allowed types: agent, llm, + tool, retriever (not workflow). + "redacted_output": + "str", # Optional. Child spans - must contain + between 1 and 999 spans Allowed types: agent, llm, + tool, retriever (not workflow). + "spans": [ + ... + ] + }, + "created_at": "2020-02-20 + 00:00:00", # Optional. When the span was created. + "input": {}, # Optional. + Input data for the span (flexible structure - can be + messages array, string, etc.). + "llm": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "model": "str", # + Optional. LLM span. + "num_input_tokens": + 0, # Optional. LLM span. + "num_output_tokens": + 0, # Optional. LLM span. + "temperature": 0.0, + # Optional. LLM span. + "time_to_first_token_ns": "str", # Optional. LLM + span. + "tools": [ + {} # + Optional. Tool definitions passed to the model. + ], + "total_tokens": 0 # + Optional. LLM span. + }, + "name": "str", # Optional. + Name/identifier for the span. + "output": {}, # Optional. + Output data from the span (flexible structure - can be + message, string, etc.). + "retriever": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + } + }, + "tool": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "tool_call_id": "str" + # Optional. Tool span. + }, + "type": + "TRACE_SPAN_TYPE_UNKNOWN", # Optional. Default value is + "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a trace. + Known values are: "TRACE_SPAN_TYPE_UNKNOWN", + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". + "workflow": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "spans": [ + ... + ] + } + } + ], + "type": "TRACE_SPAN_TYPE_UNKNOWN" # + Optional. Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of + spans in a trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". + } + ], + "ground_truth": "str", # Optional. The ground truth for the + prompt. + "input": "str", # Optional. The prompt level results. + "input_tokens": "str", # Optional. The number of input + tokens used in the prompt. + "output": "str", # Optional. The prompt level results. + "output_tokens": "str", # Optional. The number of output + tokens used in the prompt. + "prompt_chunks": [ + { + "chunk_usage_pct": 0.0, # Optional. The + usage percentage of the chunk. + "chunk_used": bool, # Optional. Indicates if + the chunk was used in the prompt. + "index_uuid": "str", # Optional. The index + uuid (Knowledge Base) of the chunk. + "source_name": "str", # Optional. The source + name for the chunk, e.g., the file name or document title. + "text": "str" # Optional. Text content of + the chunk. + } + ], + "prompt_id": 0, # Optional. Prompt ID. + "prompt_level_metric_results": [ + { + "error_description": "str", # Optional. + Error description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric + name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is + "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value + of the metric as a number. + "reasoning": "str", # Optional. Reasoning of + the metric result. + "string_value": "str" # Optional. The value + of the metric as a string. + } + ], + "trace_id": "str" # Optional. The trace id for the prompt. + } + ] } # response body for status code(s): 404 response == { @@ -231589,9 +241997,10 @@ async def get_evaluation_test_case( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_evaluation_test_case_request( - test_case_uuid=test_case_uuid, - evaluation_test_case_version=evaluation_test_case_version, + _request = build_genai_get_evaluation_run_results_request( + evaluation_run_uuid=evaluation_run_uuid, + page=page, + per_page=per_page, headers=_headers, params=_params, ) @@ -231650,140 +242059,20 @@ async def get_evaluation_test_case( return cast(JSON, deserialized) # type: ignore - @overload - async def update_evaluation_test_case( - self, - test_case_uuid: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update an Evaluation Test Case. - - To update an evaluation test-case send a PUT request to - ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. - - :param test_case_uuid: Test-case UUID to update. Required. - :type test_case_uuid: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case - is executed. - "description": "str", # Optional. Description of the test case. - "metrics": { - "metric_uuids": [ - "str" # Optional. - ] - }, - "name": "str", # Optional. Name of the test case. - "star_metric": { - "metric_uuid": "str", # Optional. - "name": "str", # Optional. - "success_threshold": 0.0, # Optional. The success threshold for the - star metric. This is a value that the metric must reach to be considered - successful. - "success_threshold_pct": 0 # Optional. The success threshold for the - star metric. This is a percentage value between 0 and 100. - }, - "test_case_uuid": "str" # Optional. Test-case UUID to update. - } - - # response body for status code(s): 200 - response == { - "test_case_uuid": "str", # Optional. - "version": 0 # Optional. The new verson of the test case. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_evaluation_test_case( - self, - test_case_uuid: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update an Evaluation Test Case. - - To update an evaluation test-case send a PUT request to - ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. - - :param test_case_uuid: Test-case UUID to update. Required. - :type test_case_uuid: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "test_case_uuid": "str", # Optional. - "version": 0 # Optional. The new verson of the test case. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - @distributed_trace_async - async def update_evaluation_test_case( - self, - test_case_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any + async def get_evaluation_run_prompt_results( + self, evaluation_run_uuid: str, prompt_id: int, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update an Evaluation Test Case. + """Retrieve Results of an Evaluation Run Prompt. - To update an evaluation test-case send a PUT request to - ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. + To retrieve results of an evaluation run, send a GET request to + ``/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}``. - :param test_case_uuid: Test-case UUID to update. Required. - :type test_case_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] + :param evaluation_run_uuid: Evaluation run UUID. Required. + :type evaluation_run_uuid: str + :param prompt_id: Prompt ID to get results for. Required. + :type prompt_id: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -231791,33 +242080,269 @@ async def update_evaluation_test_case( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case - is executed. - "description": "str", # Optional. Description of the test case. - "metrics": { - "metric_uuids": [ - "str" # Optional. - ] - }, - "name": "str", # Optional. Name of the test case. - "star_metric": { - "metric_uuid": "str", # Optional. - "name": "str", # Optional. - "success_threshold": 0.0, # Optional. The success threshold for the - star metric. This is a value that the metric must reach to be considered - successful. - "success_threshold_pct": 0 # Optional. The success threshold for the - star metric. This is a percentage value between 0 and 100. - }, - "test_case_uuid": "str" # Optional. Test-case UUID to update. - } - # response body for status code(s): 200 response == { - "test_case_uuid": "str", # Optional. - "version": 0 # Optional. The new verson of the test case. + "prompt": { + "evaluation_trace_spans": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. + When the span was created. + "input": {}, # Optional. Input data for the span + (flexible structure - can be messages array, string, etc.). + "name": "str", # Optional. Name/identifier for the + span. + "output": {}, # Optional. Output data from the span + (flexible structure - can be message, string, etc.). + "retriever_chunks": [ + { + "chunk_usage_pct": 0.0, # Optional. + The usage percentage of the chunk. + "chunk_used": bool, # Optional. + Indicates if the chunk was used in the prompt. + "index_uuid": "str", # Optional. The + index uuid (Knowledge Base) of the chunk. + "source_name": "str", # Optional. + The source name for the chunk, e.g., the file name or + document title. + "text": "str" # Optional. Text + content of the chunk. + } + ], + "span_level_metric_results": [ + { + "error_description": "str", # + Optional. Error description if the metric could not be + calculated. + "metric_name": "str", # Optional. + Metric name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value + is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The + value of the metric as a number. + "reasoning": "str", # Optional. + Reasoning of the metric result. + "string_value": "str" # Optional. + The value of the metric as a string. + } + ], + "spans": [ + { + "agent": { + "agent_type": + "AGENT_TYPE_UNSPECIFIED", # Optional. Default value is + "AGENT_TYPE_UNSPECIFIED". Agent span. Known values are: + "AGENT_TYPE_UNSPECIFIED", "AGENT_TYPE_DEFAULT", + "AGENT_TYPE_PLANNER", "AGENT_TYPE_REACT", + "AGENT_TYPE_REFLECTION", "AGENT_TYPE_ROUTER", + "AGENT_TYPE_CLASSIFIER", "AGENT_TYPE_SUPERVISOR", and + "AGENT_TYPE_JUDGE". + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "redacted_input": "str", # + Optional. Child spans - must contain between 1 and 999 + spans Allowed types: agent, llm, tool, retriever (not + workflow). + "redacted_output": "str", # + Optional. Child spans - must contain between 1 and 999 + spans Allowed types: agent, llm, tool, retriever (not + workflow). + "spans": [ + ... + ] + }, + "created_at": "2020-02-20 00:00:00", + # Optional. When the span was created. + "input": {}, # Optional. Input data + for the span (flexible structure - can be messages array, + string, etc.). + "llm": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "model": "str", # Optional. + LLM span. + "num_input_tokens": 0, # + Optional. LLM span. + "num_output_tokens": 0, # + Optional. LLM span. + "temperature": 0.0, # + Optional. LLM span. + "time_to_first_token_ns": + "str", # Optional. LLM span. + "tools": [ + {} # Optional. Tool + definitions passed to the model. + ], + "total_tokens": 0 # + Optional. LLM span. + }, + "name": "str", # Optional. + Name/identifier for the span. + "output": {}, # Optional. Output + data from the span (flexible structure - can be message, + string, etc.). + "retriever": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + } + }, + "tool": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "tool_call_id": "str" # + Optional. Tool span. + }, + "type": "TRACE_SPAN_TYPE_UNKNOWN", # + Optional. Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types + of spans in a trace. Known values are: + "TRACE_SPAN_TYPE_UNKNOWN", "TRACE_SPAN_TYPE_LLM", + "TRACE_SPAN_TYPE_RETRIEVER", "TRACE_SPAN_TYPE_TOOL", + "TRACE_SPAN_TYPE_AGENT", and "TRACE_SPAN_TYPE_WORKFLOW". + "workflow": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "spans": [ + ... + ] + } + } + ], + "type": "TRACE_SPAN_TYPE_UNKNOWN" # Optional. + Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a + trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". + } + ], + "ground_truth": "str", # Optional. The ground truth for the prompt. + "input": "str", # Optional. + "input_tokens": "str", # Optional. The number of input tokens used + in the prompt. + "output": "str", # Optional. + "output_tokens": "str", # Optional. The number of output tokens used + in the prompt. + "prompt_chunks": [ + { + "chunk_usage_pct": 0.0, # Optional. The usage + percentage of the chunk. + "chunk_used": bool, # Optional. Indicates if the + chunk was used in the prompt. + "index_uuid": "str", # Optional. The index uuid + (Knowledge Base) of the chunk. + "source_name": "str", # Optional. The source name + for the chunk, e.g., the file name or document title. + "text": "str" # Optional. Text content of the chunk. + } + ], + "prompt_id": 0, # Optional. Prompt ID. + "prompt_level_metric_results": [ + { + "error_description": "str", # Optional. Error + description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric name. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", + # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known + values are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value of the + metric as a number. + "reasoning": "str", # Optional. Reasoning of the + metric result. + "string_value": "str" # Optional. The value of the + metric as a string. + } + ], + "trace_id": "str" # Optional. The trace id for the prompt. + } } # response body for status code(s): 404 response == { @@ -231844,30 +242369,14 @@ async def update_evaluation_test_case( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_genai_update_evaluation_test_case_request( - test_case_uuid=test_case_uuid, - content_type=content_type, - json=_json, - content=_content, + _request = build_genai_get_evaluation_run_prompt_results_request( + evaluation_run_uuid=evaluation_run_uuid, + prompt_id=prompt_id, headers=_headers, params=_params, ) @@ -231927,23 +242436,12 @@ async def update_evaluation_test_case( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_indexing_jobs( - self, - *, - page: Optional[int] = None, - per_page: Optional[int] = None, - **kwargs: Any - ) -> JSON: + async def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Indexing Jobs for a Knowledge Base. + """List Evaluation Test Cases. - To list all indexing jobs for a knowledge base, send a GET request to - ``/v2/gen-ai/indexing_jobs``. + To list all evaluation test cases, send a GET request to ``/v2/gen-ai/evaluation_test_cases``. - :keyword page: Page number. Default value is None. - :paramtype page: int - :keyword per_page: Items per page. Default value is None. - :paramtype per_page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -231953,97 +242451,136 @@ async def list_indexing_jobs( # response body for status code(s): 200 response == { - "jobs": [ + "evaluation_test_cases": [ { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ + "archived_at": "2020-02-20 00:00:00", # Optional. + Alternative way of authentication for internal usage only - should not be + exposed to public api. + "created_at": "2020-02-20 00:00:00", # Optional. Alternative + way of authentication for internal usage only - should not be exposed to + public api. + "created_by_user_email": "str", # Optional. Alternative way + of authentication for internal usage only - should not be exposed to + public api. + "created_by_user_id": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "dataset": { + "created_at": "2020-02-20 00:00:00", # Optional. + Time created at. + "dataset_name": "str", # Optional. Name of the + dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". + "dataset_uuid": "str", # Optional. UUID of the + dataset. + "file_size": "str", # Optional. The size of the + dataset uploaded file in bytes. + "has_ground_truth": bool, # Optional. Does the + dataset have a ground truth column?. + "row_count": 0 # Optional. Number of rows in the + dataset. + }, + "dataset_name": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "dataset_uuid": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "description": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "latest_version_number_of_runs": 0, # Optional. Alternative + way of authentication for internal usage only - should not be exposed to + public api. + "metrics": [ { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. + "category": "METRIC_CATEGORY_UNSPECIFIED", # + Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known + values are: "METRIC_CATEGORY_UNSPECIFIED", + "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", + "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and + "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. + Alternative way of authentication for internal usage only - + should not be exposed to public api. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether a + metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the + metric is inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. + Alternative way of authentication for internal usage only - + should not be exposed to public api. + "metric_name": "str", # Optional. + Alternative way of authentication for internal usage only - + should not be exposed to public api. + "metric_rank": 0, # Optional. Alternative + way of authentication for internal usage only - should not be + exposed to public api. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known + values are: "METRIC_TYPE_UNSPECIFIED", + "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. + Alternative way of authentication for internal usage only - + should not be exposed to public api. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is + "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum + value for the metric. + "range_min": 0.0 # Optional. The minimum + value for the metric. } ], - "data_source_uuids": [ - "str" # Optional. The indexing jobs. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. The - indexing jobs. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. The - indexing jobs. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - } - ], - "links": { - "pages": { - "first": "str", # Optional. First page. - "last": "str", # Optional. Last page. - "next": "str", # Optional. Next page. - "previous": "str" # Optional. Previous page. + "name": "str", # Optional. Alternative way of authentication + for internal usage only - should not be exposed to public api. + "star_metric": { + "metric_uuid": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to + public api. + "name": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to + public api. + "success_threshold": 0.0, # Optional. The success + threshold for the star metric. This is a value that the metric must + reach to be considered successful. + "success_threshold_pct": 0 # Optional. The success + threshold for the star metric. This is a percentage value between 0 + and 100. + }, + "test_case_uuid": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "total_runs": 0, # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "updated_at": "2020-02-20 00:00:00", # Optional. Alternative + way of authentication for internal usage only - should not be exposed to + public api. + "updated_by_user_email": "str", # Optional. Alternative way + of authentication for internal usage only - should not be exposed to + public api. + "updated_by_user_id": "str", # Optional. Alternative way of + authentication for internal usage only - should not be exposed to public + api. + "version": 0 # Optional. Alternative way of authentication + for internal usage only - should not be exposed to public api. } - }, - "meta": { - "page": 0, # Optional. The current page. - "pages": 0, # Optional. Total number of pages. - "total": 0 # Optional. Total amount of items over all pages. - } + ] } # response body for status code(s): 404 response == { @@ -232075,9 +242612,7 @@ async def list_indexing_jobs( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_indexing_jobs_request( - page=page, - per_page=per_page, + _request = build_genai_list_evaluation_test_cases_request( headers=_headers, params=_params, ) @@ -232137,7 +242672,7 @@ async def list_indexing_jobs( return cast(JSON, deserialized) # type: ignore @overload - async def create_indexing_job( + async def create_evaluation_test_case( self, body: Optional[JSON] = None, *, @@ -232145,10 +242680,9 @@ async def create_indexing_job( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Indexing Job for a Knowledge Base. + """Create Evaluation Test Case. - To start an indexing job for a knowledge base, send a POST request to - ``/v2/gen-ai/indexing_jobs``. + To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. :param body: Default value is None. :type body: JSON @@ -232164,86 +242698,29 @@ async def create_indexing_job( # JSON input template you can fill out and use as your body input. body = { - "data_source_uuids": [ - "str" # Optional. List of data source ids to index, if none are - provided, all data sources will be indexed. + "agent_workspace_name": "str", # Optional. + "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case + is executed. + "description": "str", # Optional. Description of the test case. + "metrics": [ + "str" # Optional. Full metric list to use for evaluation test case. ], - "knowledge_base_uuid": "str" # Optional. Knowledge base id. + "name": "str", # Optional. Name of the test case. + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + }, + "workspace_uuid": "str" # Optional. The workspace uuid. } # response body for status code(s): 200 response == { - "job": { - "completed_datasources": 0, # Optional. Number of datasources - indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid of the - indexed data source. - "error_details": "str", # Optional. A detailed error - description. - "error_msg": "str", # Optional. A string code - provinding a hint which part of the system experienced an error. - "failed_item_count": "str", # Optional. Total count - of files that have failed. - "indexed_file_count": "str", # Optional. Total count - of files that have been indexed. - "indexed_item_count": "str", # Optional. Total count - of files that have been indexed. - "removed_item_count": "str", # Optional. Total count - of files that have been removed. - "skipped_item_count": "str", # Optional. Total count - of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. - Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size of - files in data source in bytes. - "total_bytes_indexed": "str", # Optional. Total size - of files in data source in bytes that have been indexed. - "total_file_count": "str" # Optional. Total file - count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "is_report_available": bool, # Optional. Boolean value to determine - if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is - "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is - "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", - "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", - "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", - "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and - "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources being - indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By the - Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "uuid": "str" # Optional. Unique id. - } + "test_case_uuid": "str" # Optional. Test"u2011case UUID. } # response body for status code(s): 404 response == { @@ -232259,7 +242736,7 @@ async def create_indexing_job( """ @overload - async def create_indexing_job( + async def create_evaluation_test_case( self, body: Optional[IO[bytes]] = None, *, @@ -232267,10 +242744,9 @@ async def create_indexing_job( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Indexing Job for a Knowledge Base. + """Create Evaluation Test Case. - To start an indexing job for a knowledge base, send a POST request to - ``/v2/gen-ai/indexing_jobs``. + To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. :param body: Default value is None. :type body: IO[bytes] @@ -232286,77 +242762,7 @@ async def create_indexing_job( # response body for status code(s): 200 response == { - "job": { - "completed_datasources": 0, # Optional. Number of datasources - indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid of the - indexed data source. - "error_details": "str", # Optional. A detailed error - description. - "error_msg": "str", # Optional. A string code - provinding a hint which part of the system experienced an error. - "failed_item_count": "str", # Optional. Total count - of files that have failed. - "indexed_file_count": "str", # Optional. Total count - of files that have been indexed. - "indexed_item_count": "str", # Optional. Total count - of files that have been indexed. - "removed_item_count": "str", # Optional. Total count - of files that have been removed. - "skipped_item_count": "str", # Optional. Total count - of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. - Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size of - files in data source in bytes. - "total_bytes_indexed": "str", # Optional. Total size - of files in data source in bytes that have been indexed. - "total_file_count": "str" # Optional. Total file - count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "is_report_available": bool, # Optional. Boolean value to determine - if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is - "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is - "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", - "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", - "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", - "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and - "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources being - indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By the - Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "uuid": "str" # Optional. Unique id. - } + "test_case_uuid": "str" # Optional. Test"u2011case UUID. } # response body for status code(s): 404 response == { @@ -232372,14 +242778,13 @@ async def create_indexing_job( """ @distributed_trace_async - async def create_indexing_job( + async def create_evaluation_test_case( self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Indexing Job for a Knowledge Base. + """Create Evaluation Test Case. - To start an indexing job for a knowledge base, send a POST request to - ``/v2/gen-ai/indexing_jobs``. + To create an evaluation test-case send a POST request to ``/v2/gen-ai/evaluation_test_cases``. :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] @@ -232392,86 +242797,29 @@ async def create_indexing_job( # JSON input template you can fill out and use as your body input. body = { - "data_source_uuids": [ - "str" # Optional. List of data source ids to index, if none are - provided, all data sources will be indexed. + "agent_workspace_name": "str", # Optional. + "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case + is executed. + "description": "str", # Optional. Description of the test case. + "metrics": [ + "str" # Optional. Full metric list to use for evaluation test case. ], - "knowledge_base_uuid": "str" # Optional. Knowledge base id. + "name": "str", # Optional. Name of the test case. + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + }, + "workspace_uuid": "str" # Optional. The workspace uuid. } # response body for status code(s): 200 response == { - "job": { - "completed_datasources": 0, # Optional. Number of datasources - indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid of the - indexed data source. - "error_details": "str", # Optional. A detailed error - description. - "error_msg": "str", # Optional. A string code - provinding a hint which part of the system experienced an error. - "failed_item_count": "str", # Optional. Total count - of files that have failed. - "indexed_file_count": "str", # Optional. Total count - of files that have been indexed. - "indexed_item_count": "str", # Optional. Total count - of files that have been indexed. - "removed_item_count": "str", # Optional. Total count - of files that have been removed. - "skipped_item_count": "str", # Optional. Total count - of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. - Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size of - files in data source in bytes. - "total_bytes_indexed": "str", # Optional. Total size - of files in data source in bytes that have been indexed. - "total_file_count": "str" # Optional. Total file - count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "is_report_available": bool, # Optional. Boolean value to determine - if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is - "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is - "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", - "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", - "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", - "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and - "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources being - indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By the - Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "uuid": "str" # Optional. Unique id. - } + "test_case_uuid": "str" # Optional. Test"u2011case UUID. } # response body for status code(s): 404 response == { @@ -232517,7 +242865,7 @@ async def create_indexing_job( else: _json = None - _request = build_genai_create_indexing_job_request( + _request = build_genai_create_evaluation_test_case_request( content_type=content_type, json=_json, content=_content, @@ -232580,17 +242928,23 @@ async def create_indexing_job( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_indexing_job_data_sources( - self, indexing_job_uuid: str, **kwargs: Any + async def list_evaluation_runs_by_test_case( + self, + evaluation_test_case_uuid: str, + *, + evaluation_test_case_version: Optional[int] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List Data Sources for Indexing Job for a Knowledge Base. + """List Evaluation Runs by Test Case. - To list all datasources for an indexing job, send a GET request to - ``/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources``. + To list all evaluation runs by test case, send a GET request to + ``/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs``. - :param indexing_job_uuid: Uuid of the indexing job. Required. - :type indexing_job_uuid: str + :param evaluation_test_case_uuid: Evaluation run UUID. Required. + :type evaluation_test_case_uuid: str + :keyword evaluation_test_case_version: Version of the test case. Default value is None. + :paramtype evaluation_test_case_version: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -232600,40 +242954,82 @@ async def list_indexing_job_data_sources( # response body for status code(s): 200 response == { - "indexed_data_sources": [ + "evaluation_runs": [ { - "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp - when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid of the indexed - data source. - "error_details": "str", # Optional. A detailed error + "agent_deleted": bool, # Optional. Whether agent is deleted. + "agent_deployment_name": "str", # Optional. The agent + deployment name. + "agent_name": "str", # Optional. Agent name. + "agent_uuid": "str", # Optional. Agent UUID. + "agent_version_hash": "str", # Optional. Version hash. + "agent_workspace_uuid": "str", # Optional. Agent workspace + uuid. + "created_by_user_email": "str", # Optional. List of + evaluation runs. + "created_by_user_id": "str", # Optional. List of evaluation + runs. + "error_description": "str", # Optional. The error description. - "error_msg": "str", # Optional. A string code provinding a - hint which part of the system experienced an error. - "failed_item_count": "str", # Optional. Total count of files - that have failed. - "indexed_file_count": "str", # Optional. Total count of - files that have been indexed. - "indexed_item_count": "str", # Optional. Total count of - files that have been indexed. - "removed_item_count": "str", # Optional. Total count of - files that have been removed. - "skipped_item_count": "str", # Optional. Total count of - files that have been skipped. - "started_at": "2020-02-20 00:00:00", # Optional. Timestamp - when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. Default - value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size of files in - data source in bytes. - "total_bytes_indexed": "str", # Optional. Total size of - files in data source in bytes that have been indexed. - "total_file_count": "str" # Optional. Total file count in - the data source. + "evaluation_run_uuid": "str", # Optional. Evaluation run + UUID. + "evaluation_test_case_workspace_uuid": "str", # Optional. + Evaluation test case workspace uuid. + "finished_at": "2020-02-20 00:00:00", # Optional. Run end + time. + "pass_status": bool, # Optional. The pass status of the + evaluation run based on the star metric. + "queued_at": "2020-02-20 00:00:00", # Optional. Run queued + time. + "run_level_metric_results": [ + { + "error_description": "str", # Optional. + Error description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric + name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is + "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value + of the metric as a number. + "reasoning": "str", # Optional. Reasoning of + the metric result. + "string_value": "str" # Optional. The value + of the metric as a string. + } + ], + "run_name": "str", # Optional. Run name. + "star_metric_result": { + "error_description": "str", # Optional. Error + description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric name. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", + # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known + values are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value of the + metric as a number. + "reasoning": "str", # Optional. Reasoning of the + metric result. + "string_value": "str" # Optional. The value of the + metric as a string. + }, + "started_at": "2020-02-20 00:00:00", # Optional. Run start + time. + "status": "EVALUATION_RUN_STATUS_UNSPECIFIED", # Optional. + Default value is "EVALUATION_RUN_STATUS_UNSPECIFIED". Evaluation Run + Statuses. Known values are: "EVALUATION_RUN_STATUS_UNSPECIFIED", + "EVALUATION_RUN_QUEUED", "EVALUATION_RUN_RUNNING_DATASET", + "EVALUATION_RUN_EVALUATING_RESULTS", "EVALUATION_RUN_CANCELLING", + "EVALUATION_RUN_CANCELLED", "EVALUATION_RUN_SUCCESSFUL", + "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and "EVALUATION_RUN_FAILED". + "test_case_description": "str", # Optional. Test case + description. + "test_case_name": "str", # Optional. Test case name. + "test_case_uuid": "str", # Optional. Test-case UUID. + "test_case_version": 0 # Optional. Test-case-version. } ] } @@ -232667,8 +243063,9 @@ async def list_indexing_job_data_sources( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_indexing_job_data_sources_request( - indexing_job_uuid=indexing_job_uuid, + _request = build_genai_list_evaluation_runs_by_test_case_request( + evaluation_test_case_uuid=evaluation_test_case_uuid, + evaluation_test_case_version=evaluation_test_case_version, headers=_headers, params=_params, ) @@ -232728,17 +243125,23 @@ async def list_indexing_job_data_sources( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_indexing_job_details_signed_url( - self, indexing_job_uuid: str, **kwargs: Any + async def get_evaluation_test_case( + self, + test_case_uuid: str, + *, + evaluation_test_case_version: Optional[int] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Get Signed URL for Indexing Job Details. + """Retrieve Information About an Existing Evaluation Test Case. - To get a signed URL for indexing job details, send a GET request to - ``/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url``. + To retrive information about an existing evaluation test case, send a GET request to + ``/v2/gen-ai/evaluation_test_case/{test_case_uuid}``. - :param indexing_job_uuid: The uuid of the indexing job. Required. - :type indexing_job_uuid: str + :param test_case_uuid: The test case uuid to retrieve. Required. + :type test_case_uuid: str + :keyword evaluation_test_case_version: Version of the test case. Default value is None. + :paramtype evaluation_test_case_version: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -232748,8 +243151,86 @@ async def get_indexing_job_details_signed_url( # response body for status code(s): 200 response == { - "signed_url": "str" # Optional. The signed url for downloading the indexing - job details. + "evaluation_test_case": { + "archived_at": "2020-02-20 00:00:00", # Optional. + "created_at": "2020-02-20 00:00:00", # Optional. + "created_by_user_email": "str", # Optional. + "created_by_user_id": "str", # Optional. + "dataset": { + "created_at": "2020-02-20 00:00:00", # Optional. Time + created at. + "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". + "dataset_uuid": "str", # Optional. UUID of the dataset. + "file_size": "str", # Optional. The size of the dataset + uploaded file in bytes. + "has_ground_truth": bool, # Optional. Does the dataset have + a ground truth column?. + "row_count": 0 # Optional. Number of rows in the dataset. + }, + "dataset_name": "str", # Optional. + "dataset_uuid": "str", # Optional. + "description": "str", # Optional. + "latest_version_number_of_runs": 0, # Optional. + "metrics": [ + { + "category": "METRIC_CATEGORY_UNSPECIFIED", # + Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known + values are: "METRIC_CATEGORY_UNSPECIFIED", + "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", + "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", + # Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope + that determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to + agent metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. + "metric_name": "str", # Optional. + "metric_rank": 0, # Optional. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values + are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", + # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known + values are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for + the metric. + "range_min": 0.0 # Optional. The minimum value for + the metric. + } + ], + "name": "str", # Optional. + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold + for the star metric. This is a value that the metric must reach to be + considered successful. + "success_threshold_pct": 0 # Optional. The success threshold + for the star metric. This is a percentage value between 0 and 100. + }, + "test_case_uuid": "str", # Optional. + "total_runs": 0, # Optional. + "updated_at": "2020-02-20 00:00:00", # Optional. + "updated_by_user_email": "str", # Optional. + "updated_by_user_id": "str", # Optional. + "version": 0 # Optional. + } } # response body for status code(s): 404 response == { @@ -232781,8 +243262,9 @@ async def get_indexing_job_details_signed_url( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_indexing_job_details_signed_url_request( - indexing_job_uuid=indexing_job_uuid, + _request = build_genai_get_evaluation_test_case_request( + test_case_uuid=test_case_uuid, + evaluation_test_case_version=evaluation_test_case_version, headers=_headers, params=_params, ) @@ -232841,16 +243323,140 @@ async def get_indexing_job_details_signed_url( return cast(JSON, deserialized) # type: ignore + @overload + async def update_evaluation_test_case( + self, + test_case_uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update an Evaluation Test Case. + + To update an evaluation test-case send a PUT request to + ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. + + :param test_case_uuid: Test-case UUID to update. Required. + :type test_case_uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case + is executed. + "description": "str", # Optional. Description of the test case. + "metrics": { + "metric_uuids": [ + "str" # Optional. + ] + }, + "name": "str", # Optional. Name of the test case. + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + }, + "test_case_uuid": "str" # Optional. Test-case UUID to update. + } + + # response body for status code(s): 200 + response == { + "test_case_uuid": "str", # Optional. + "version": 0 # Optional. The new verson of the test case. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_evaluation_test_case( + self, + test_case_uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update an Evaluation Test Case. + + To update an evaluation test-case send a PUT request to + ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. + + :param test_case_uuid: Test-case UUID to update. Required. + :type test_case_uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "test_case_uuid": "str", # Optional. + "version": 0 # Optional. The new verson of the test case. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + @distributed_trace_async - async def get_indexing_job(self, uuid: str, **kwargs: Any) -> JSON: + async def update_evaluation_test_case( + self, + test_case_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve Status of Indexing Job for a Knowledge Base. + """Update an Evaluation Test Case. - To get status of an indexing Job for a knowledge base, send a GET request to - ``/v2/gen-ai/indexing_jobs/{uuid}``. + To update an evaluation test-case send a PUT request to + ``/v2/gen-ai/evaluation_test_cases/{test_case_uuid}``. - :param uuid: Indexing job id. Required. - :type uuid: str + :param test_case_uuid: Test-case UUID to update. Required. + :type test_case_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -232858,78 +243464,258 @@ async def get_indexing_job(self, uuid: str, **kwargs: Any) -> JSON: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "dataset_uuid": "str", # Optional. Dataset against which the test"u2011case + is executed. + "description": "str", # Optional. Description of the test case. + "metrics": { + "metric_uuids": [ + "str" # Optional. + ] + }, + "name": "str", # Optional. Name of the test case. + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + }, + "test_case_uuid": "str" # Optional. Test-case UUID to update. + } + # response body for status code(s): 200 response == { - "job": { - "completed_datasources": 0, # Optional. Number of datasources - indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid of the - indexed data source. - "error_details": "str", # Optional. A detailed error - description. - "error_msg": "str", # Optional. A string code - provinding a hint which part of the system experienced an error. - "failed_item_count": "str", # Optional. Total count - of files that have failed. - "indexed_file_count": "str", # Optional. Total count - of files that have been indexed. - "indexed_item_count": "str", # Optional. Total count - of files that have been indexed. - "removed_item_count": "str", # Optional. Total count - of files that have been removed. - "skipped_item_count": "str", # Optional. Total count - of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # Optional. - Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. - Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size of - files in data source in bytes. - "total_bytes_indexed": "str", # Optional. Total size - of files in data source in bytes that have been indexed. - "total_file_count": "str" # Optional. Total file - count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "is_report_available": bool, # Optional. Boolean value to determine - if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is - "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is - "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", - "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", - "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", - "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and - "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources being - indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By the - Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "uuid": "str" # Optional. Unique id. + "test_case_uuid": "str", # Optional. + "version": 0 # Optional. The new verson of the test case. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_update_evaluation_test_case_request( + test_case_uuid=test_case_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def list_indexing_jobs( + self, + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List Indexing Jobs for a Knowledge Base. + + To list all indexing jobs for a knowledge base, send a GET request to + ``/v2/gen-ai/indexing_jobs``. + + :keyword page: Page number. Default value is None. + :paramtype page: int + :keyword per_page: Items per page. Default value is None. + :paramtype per_page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "jobs": [ + { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. The indexing jobs. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. The + indexing jobs. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. The + indexing jobs. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + } + ], + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. } } # response body for status code(s): 404 @@ -232962,8 +243748,9 @@ async def get_indexing_job(self, uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_indexing_job_request( - uuid=uuid, + _request = build_genai_list_indexing_jobs_request( + page=page, + per_page=per_page, headers=_headers, params=_params, ) @@ -233023,22 +243810,19 @@ async def get_indexing_job(self, uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def cancel_indexing_job( + async def create_indexing_job( self, - uuid: str, body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Cancel Indexing Job for a Knowledge Base. + """Start Indexing Job for a Knowledge Base. - To cancel an indexing job for a knowledge base, send a PUT request to - ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + To start an indexing job for a knowledge base, send a POST request to + ``/v2/gen-ai/indexing_jobs``. - :param uuid: A unique identifier for an indexing job. Required. - :type uuid: str :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -233053,7 +243837,11 @@ async def cancel_indexing_job( # JSON input template you can fill out and use as your body input. body = { - "uuid": "str" # Optional. A unique identifier for an indexing job. + "data_source_uuids": [ + "str" # Optional. List of data source ids to index, if none are + provided, all data sources will be indexed. + ], + "knowledge_base_uuid": "str" # Optional. Knowledge base id. } # response body for status code(s): 200 @@ -233144,22 +243932,19 @@ async def cancel_indexing_job( """ @overload - async def cancel_indexing_job( + async def create_indexing_job( self, - uuid: str, body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Cancel Indexing Job for a Knowledge Base. + """Start Indexing Job for a Knowledge Base. - To cancel an indexing job for a knowledge base, send a PUT request to - ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + To start an indexing job for a knowledge base, send a POST request to + ``/v2/gen-ai/indexing_jobs``. - :param uuid: A unique identifier for an indexing job. Required. - :type uuid: str :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -233260,17 +244045,15 @@ async def cancel_indexing_job( """ @distributed_trace_async - async def cancel_indexing_job( - self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + async def create_indexing_job( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Cancel Indexing Job for a Knowledge Base. + """Start Indexing Job for a Knowledge Base. - To cancel an indexing job for a knowledge base, send a PUT request to - ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + To start an indexing job for a knowledge base, send a POST request to + ``/v2/gen-ai/indexing_jobs``. - :param uuid: A unique identifier for an indexing job. Required. - :type uuid: str :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object @@ -233282,7 +244065,11 @@ async def cancel_indexing_job( # JSON input template you can fill out and use as your body input. body = { - "uuid": "str" # Optional. A unique identifier for an indexing job. + "data_source_uuids": [ + "str" # Optional. List of data source ids to index, if none are + provided, all data sources will be indexed. + ], + "knowledge_base_uuid": "str" # Optional. Knowledge base id. } # response body for status code(s): 200 @@ -233403,8 +244190,7 @@ async def cancel_indexing_job( else: _json = None - _request = build_genai_cancel_indexing_job_request( - uuid=uuid, + _request = build_genai_create_indexing_job_request( content_type=content_type, json=_json, content=_content, @@ -233467,22 +244253,17 @@ async def cancel_indexing_job( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_knowledge_bases( - self, - *, - page: Optional[int] = None, - per_page: Optional[int] = None, - **kwargs: Any + async def list_indexing_job_data_sources( + self, indexing_job_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List Knowledge Bases. + """List Data Sources for Indexing Job for a Knowledge Base. - To list all knowledge bases, send a GET request to ``/v2/gen-ai/knowledge_bases``. + To list all datasources for an indexing job, send a GET request to + ``/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources``. - :keyword page: Page number. Default value is None. - :paramtype page: int - :keyword per_page: Items per page. Default value is None. - :paramtype per_page: int + :param indexing_job_uuid: Uuid of the indexing job. Required. + :type indexing_job_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -233492,125 +244273,42 @@ async def list_knowledge_bases( # response body for status code(s): 200 response == { - "knowledge_bases": [ + "indexed_data_sources": [ { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time - when the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "database_id": "str", # Optional. The knowledge bases. - "embedding_model_uuid": "str", # Optional. The knowledge - bases. - "is_public": bool, # Optional. Whether the knowledge base is - public or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. - Creation date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 - 00:00:00", # Optional. Timestamp when data source completed - indexing. - "data_source_uuid": "str", # - Optional. Uuid of the indexed data source. - "error_details": "str", # Optional. - A detailed error description. - "error_msg": "str", # Optional. A - string code provinding a hint which part of the system - experienced an error. - "failed_item_count": "str", # - Optional. Total count of files that have failed. - "indexed_file_count": "str", # - Optional. Total count of files that have been indexed. - "indexed_item_count": "str", # - Optional. Total count of files that have been indexed. - "removed_item_count": "str", # - Optional. Total count of files that have been removed. - "skipped_item_count": "str", # - Optional. Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", - # Optional. Timestamp when data source started indexing. - "status": - "DATA_SOURCE_STATUS_UNKNOWN", # Optional. Default value is - "DATA_SOURCE_STATUS_UNKNOWN". Known values are: - "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", - "DATA_SOURCE_STATUS_FAILED", and - "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. - Total size of files in data source in bytes. - "total_bytes_indexed": "str", # - Optional. Total size of files in data source in bytes that - have been indexed. - "total_file_count": "str" # - Optional. Total file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean - value to determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge - base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. - Default value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. - Default value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This - field is deprecated]. - "total_datasources": 0, # Optional. Number of - datasources being indexed. - "total_tokens": "str", # Optional. Total Tokens - Consumed By the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. - Last modified. - "uuid": "str" # Optional. Unique id. - }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. The knowledge bases. - "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related - resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "user_id": "str", # Optional. Id of user that created the - knowledge base. - "uuid": "str" # Optional. Unique id for knowledge base. - } - ], - "links": { - "pages": { - "first": "str", # Optional. First page. - "last": "str", # Optional. Last page. - "next": "str", # Optional. Next page. - "previous": "str" # Optional. Previous page. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp + when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid of the indexed + data source. + "error_details": "str", # Optional. A detailed error + description. + "error_msg": "str", # Optional. A string code provinding a + hint which part of the system experienced an error. + "failed_item_count": "str", # Optional. Total count of files + that have failed. + "indexed_file_count": "str", # Optional. Total count of + files that have been indexed. + "indexed_item_count": "str", # Optional. Total count of + files that have been indexed. + "removed_item_count": "str", # Optional. Total count of + files that have been removed. + "skipped_item_count": "str", # Optional. Total count of + files that have been skipped. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp + when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. Default + value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size of files in + data source in bytes. + "total_bytes_indexed": "str", # Optional. Total size of + files in data source in bytes that have been indexed. + "total_file_count": "str" # Optional. Total file count in + the data source. } - }, - "meta": { - "page": 0, # Optional. The current page. - "pages": 0, # Optional. Total number of pages. - "total": 0 # Optional. Total amount of items over all pages. - } + ] } # response body for status code(s): 404 response == { @@ -233642,9 +244340,990 @@ async def list_knowledge_bases( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_knowledge_bases_request( - page=page, - per_page=per_page, + _request = build_genai_list_indexing_job_data_sources_request( + indexing_job_uuid=indexing_job_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_indexing_job_details_signed_url( + self, indexing_job_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Get Signed URL for Indexing Job Details. + + To get a signed URL for indexing job details, send a GET request to + ``/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url``. + + :param indexing_job_uuid: The uuid of the indexing job. Required. + :type indexing_job_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "signed_url": "str" # Optional. The signed url for downloading the indexing + job details. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_indexing_job_details_signed_url_request( + indexing_job_uuid=indexing_job_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_indexing_job(self, uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve Status of Indexing Job for a Knowledge Base. + + To get status of an indexing Job for a knowledge base, send a GET request to + ``/v2/gen-ai/indexing_jobs/{uuid}``. + + :param uuid: Indexing job id. Required. + :type uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "job": { + "completed_datasources": 0, # Optional. Number of datasources + indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid of the + indexed data source. + "error_details": "str", # Optional. A detailed error + description. + "error_msg": "str", # Optional. A string code + provinding a hint which part of the system experienced an error. + "failed_item_count": "str", # Optional. Total count + of files that have failed. + "indexed_file_count": "str", # Optional. Total count + of files that have been indexed. + "indexed_item_count": "str", # Optional. Total count + of files that have been indexed. + "removed_item_count": "str", # Optional. Total count + of files that have been removed. + "skipped_item_count": "str", # Optional. Total count + of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. + Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size of + files in data source in bytes. + "total_bytes_indexed": "str", # Optional. Total size + of files in data source in bytes that have been indexed. + "total_file_count": "str" # Optional. Total file + count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "is_report_available": bool, # Optional. Boolean value to determine + if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is + "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is + "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", + "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", + "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", + "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and + "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources being + indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By the + Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_indexing_job_request( + uuid=uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def cancel_indexing_job( + self, + uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Cancel Indexing Job for a Knowledge Base. + + To cancel an indexing job for a knowledge base, send a PUT request to + ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + + :param uuid: A unique identifier for an indexing job. Required. + :type uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "uuid": "str" # Optional. A unique identifier for an indexing job. + } + + # response body for status code(s): 200 + response == { + "job": { + "completed_datasources": 0, # Optional. Number of datasources + indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid of the + indexed data source. + "error_details": "str", # Optional. A detailed error + description. + "error_msg": "str", # Optional. A string code + provinding a hint which part of the system experienced an error. + "failed_item_count": "str", # Optional. Total count + of files that have failed. + "indexed_file_count": "str", # Optional. Total count + of files that have been indexed. + "indexed_item_count": "str", # Optional. Total count + of files that have been indexed. + "removed_item_count": "str", # Optional. Total count + of files that have been removed. + "skipped_item_count": "str", # Optional. Total count + of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. + Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size of + files in data source in bytes. + "total_bytes_indexed": "str", # Optional. Total size + of files in data source in bytes that have been indexed. + "total_file_count": "str" # Optional. Total file + count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "is_report_available": bool, # Optional. Boolean value to determine + if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is + "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is + "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", + "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", + "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", + "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and + "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources being + indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By the + Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def cancel_indexing_job( + self, + uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Cancel Indexing Job for a Knowledge Base. + + To cancel an indexing job for a knowledge base, send a PUT request to + ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + + :param uuid: A unique identifier for an indexing job. Required. + :type uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "job": { + "completed_datasources": 0, # Optional. Number of datasources + indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid of the + indexed data source. + "error_details": "str", # Optional. A detailed error + description. + "error_msg": "str", # Optional. A string code + provinding a hint which part of the system experienced an error. + "failed_item_count": "str", # Optional. Total count + of files that have failed. + "indexed_file_count": "str", # Optional. Total count + of files that have been indexed. + "indexed_item_count": "str", # Optional. Total count + of files that have been indexed. + "removed_item_count": "str", # Optional. Total count + of files that have been removed. + "skipped_item_count": "str", # Optional. Total count + of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. + Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size of + files in data source in bytes. + "total_bytes_indexed": "str", # Optional. Total size + of files in data source in bytes that have been indexed. + "total_file_count": "str" # Optional. Total file + count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "is_report_available": bool, # Optional. Boolean value to determine + if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is + "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is + "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", + "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", + "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", + "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and + "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources being + indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By the + Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def cancel_indexing_job( + self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Cancel Indexing Job for a Knowledge Base. + + To cancel an indexing job for a knowledge base, send a PUT request to + ``/v2/gen-ai/indexing_jobs/{uuid}/cancel``. + + :param uuid: A unique identifier for an indexing job. Required. + :type uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "uuid": "str" # Optional. A unique identifier for an indexing job. + } + + # response body for status code(s): 200 + response == { + "job": { + "completed_datasources": 0, # Optional. Number of datasources + indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid of the + indexed data source. + "error_details": "str", # Optional. A detailed error + description. + "error_msg": "str", # Optional. A string code + provinding a hint which part of the system experienced an error. + "failed_item_count": "str", # Optional. Total count + of files that have failed. + "indexed_file_count": "str", # Optional. Total count + of files that have been indexed. + "indexed_item_count": "str", # Optional. Total count + of files that have been indexed. + "removed_item_count": "str", # Optional. Total count + of files that have been removed. + "skipped_item_count": "str", # Optional. Total count + of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # Optional. + Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # Optional. + Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size of + files in data source in bytes. + "total_bytes_indexed": "str", # Optional. Total size + of files in data source in bytes that have been indexed. + "total_file_count": "str" # Optional. Total file + count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "is_report_available": bool, # Optional. Boolean value to determine + if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default value is + "BATCH_JOB_PHASE_UNKNOWN". Known values are: "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", and "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default value is + "INDEX_JOB_STATUS_UNKNOWN". Known values are: "INDEX_JOB_STATUS_UNKNOWN", + "INDEX_JOB_STATUS_PARTIAL", "INDEX_JOB_STATUS_IN_PROGRESS", + "INDEX_JOB_STATUS_COMPLETED", "INDEX_JOB_STATUS_FAILED", + "INDEX_JOB_STATUS_NO_CHANGES", "INDEX_JOB_STATUS_PENDING", and + "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources being + indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By the + Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_cancel_indexing_job_request( + uuid=uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def list_knowledge_bases( + self, + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List Knowledge Bases. + + To list all knowledge bases, send a GET request to ``/v2/gen-ai/knowledge_bases``. + + :keyword page: Page number. Default value is None. + :paramtype page: int + :keyword per_page: Items per page. Default value is None. + :paramtype per_page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "knowledge_bases": [ + { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time + when the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "database_id": "str", # Optional. The knowledge bases. + "embedding_model_uuid": "str", # Optional. The knowledge + bases. + "is_public": bool, # Optional. Whether the knowledge base is + public or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 + 00:00:00", # Optional. Timestamp when data source completed + indexing. + "data_source_uuid": "str", # + Optional. Uuid of the indexed data source. + "error_details": "str", # Optional. + A detailed error description. + "error_msg": "str", # Optional. A + string code provinding a hint which part of the system + experienced an error. + "failed_item_count": "str", # + Optional. Total count of files that have failed. + "indexed_file_count": "str", # + Optional. Total count of files that have been indexed. + "indexed_item_count": "str", # + Optional. Total count of files that have been indexed. + "removed_item_count": "str", # + Optional. Total count of files that have been removed. + "skipped_item_count": "str", # + Optional. Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", + # Optional. Timestamp when data source started indexing. + "status": + "DATA_SOURCE_STATUS_UNKNOWN", # Optional. Default value is + "DATA_SOURCE_STATUS_UNKNOWN". Known values are: + "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", + "DATA_SOURCE_STATUS_FAILED", and + "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. + Total size of files in data source in bytes. + "total_bytes_indexed": "str", # + Optional. Total size of files in data source in bytes that + have been indexed. + "total_file_count": "str" # + Optional. Total file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean + value to determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge + base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. + Default value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. + Default value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This + field is deprecated]. + "total_datasources": 0, # Optional. Number of + datasources being indexed. + "total_tokens": "str", # Optional. Total Tokens + Consumed By the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. The knowledge bases. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is + enabled for retrieval. + "model": "str" # Optional. Reranker model internal + name. + }, + "tags": [ + "str" # Optional. Tags to organize related + resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "user_id": "str", # Optional. Id of user that created the + knowledge base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + ], + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_list_knowledge_bases_request( + page=page, + per_page=per_page, headers=_headers, params=_params, ) @@ -233747,18 +245426,21 @@ async def create_knowledge_base( data_source_details. "bucket_region": "str", # Optional. Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Optional data + sources to attach at creation. Omit or use an empty list to create + the knowledge base without sources, then add sources (with chunking + strategy and sizes) using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic @@ -233785,10 +245467,14 @@ async def create_knowledge_base( you can obrain a refresh token by following the oauth2 flow. see /v2/gen-ai/oauth2/google/tokens for reference. }, - "item_path": "str", # Optional. The data sources to use for - this knowledge base. See `Organize Data Sources - `_ - for more information on data sources best practices. + "item_path": "str", # Optional. Optional data sources to + attach at creation. Omit or use an empty list to create the knowledge + base without sources, then add sources (with chunking strategy and sizes) + using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. "spaces_data_source": { "bucket_name": "str", # Optional. Spaces bucket name. @@ -233825,6 +245511,16 @@ async def create_knowledge_base( knowledge base will belong to. "region": "str", # Optional. The datacenter region to deploy the knowledge base in. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "size": "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", # Optional. Default value is + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED". Known values are: + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", "OPEN_SEARCH_PLAN_SIZE_SMALL", + "OPEN_SEARCH_PLAN_SIZE_MEDIUM", "OPEN_SEARCH_PLAN_SIZE_LARGE", and + "OPEN_SEARCH_PLAN_SIZE_EXTRA_LARGE". "tags": [ "str" # Optional. Tags to organize your knowledge base. ], @@ -233923,6 +245619,11 @@ async def create_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -234061,6 +245762,11 @@ async def create_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -234120,18 +245826,21 @@ async def create_knowledge_base( data_source_details. "bucket_region": "str", # Optional. Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Optional data + sources to attach at creation. Omit or use an empty list to create + the knowledge base without sources, then add sources (with chunking + strategy and sizes) using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic @@ -234158,10 +245867,14 @@ async def create_knowledge_base( you can obrain a refresh token by following the oauth2 flow. see /v2/gen-ai/oauth2/google/tokens for reference. }, - "item_path": "str", # Optional. The data sources to use for - this knowledge base. See `Organize Data Sources - `_ - for more information on data sources best practices. + "item_path": "str", # Optional. Optional data sources to + attach at creation. Omit or use an empty list to create the knowledge + base without sources, then add sources (with chunking strategy and sizes) + using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. "spaces_data_source": { "bucket_name": "str", # Optional. Spaces bucket name. @@ -234198,6 +245911,16 @@ async def create_knowledge_base( knowledge base will belong to. "region": "str", # Optional. The datacenter region to deploy the knowledge base in. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "size": "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", # Optional. Default value is + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED". Known values are: + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", "OPEN_SEARCH_PLAN_SIZE_SMALL", + "OPEN_SEARCH_PLAN_SIZE_MEDIUM", "OPEN_SEARCH_PLAN_SIZE_LARGE", and + "OPEN_SEARCH_PLAN_SIZE_EXTRA_LARGE". "tags": [ "str" # Optional. Tags to organize your knowledge base. ], @@ -234296,6 +246019,11 @@ async def create_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -234728,18 +246456,14 @@ async def list_knowledge_base_data_sources( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. The data sources. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic @@ -234978,17 +246702,14 @@ async def create_knowledge_base_data_source( "region": "str", # Optional. Region of bucket. "secret_key": "str" # Optional. The AWS Secret Key. }, - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235028,17 +246749,15 @@ async def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235178,17 +246897,15 @@ async def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235321,17 +247038,14 @@ async def create_knowledge_base_data_source( "region": "str", # Optional. Region of bucket. "secret_key": "str" # Optional. The AWS Secret Key. }, - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235371,17 +247085,15 @@ async def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235609,17 +247321,14 @@ async def update_knowledge_base_data_source( # JSON input template you can fill out and use as your body input. body = { - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235638,17 +247347,15 @@ async def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235791,17 +247498,15 @@ async def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235930,17 +247635,14 @@ async def update_knowledge_base_data_source( # JSON input template you can fill out and use as your body input. body = { - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -235959,17 +247661,15 @@ async def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -236081,31 +247781,1237 @@ async def update_knowledge_base_data_source( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_update_knowledge_base_data_source_request( + knowledge_base_uuid=knowledge_base_uuid, + data_source_uuid=data_source_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete_knowledge_base_data_source( + self, knowledge_base_uuid: str, data_source_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Delete a Data Source from a Knowledge Base. + + To delete a data source from a knowledge base, send a DELETE request to + ``/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}``. + + :param knowledge_base_uuid: Knowledge base id. Required. + :type knowledge_base_uuid: str + :param data_source_uuid: Data source id. Required. + :type data_source_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data_source_uuid": "str", # Optional. Data source id. + "knowledge_base_uuid": "str" # Optional. Knowledge base id. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_delete_knowledge_base_data_source_request( + knowledge_base_uuid=knowledge_base_uuid, + data_source_uuid=data_source_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def list_indexing_jobs_by_knowledge_base( + self, knowledge_base_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List Indexing Jobs for a Knowledge Base. + + To list latest 15 indexing jobs for a knowledge base, send a GET request to + ``/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs``. + + :param knowledge_base_uuid: Knowledge base uuid in string. Required. + :type knowledge_base_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "jobs": [ + { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. The indexing jobs. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. The + indexing jobs. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. The + indexing jobs. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + } + ], + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_list_indexing_jobs_by_knowledge_base_request( + knowledge_base_uuid=knowledge_base_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve Information About an Existing Knowledge Base. + + To retrive information about an existing knowledge base, send a GET request to + ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "database_status": "CREATING", # Optional. Default value is "CREATING". + Known values are: "CREATING", "ONLINE", "POWEROFF", "REBUILDING", "REBALANCING", + "DECOMMISSIONED", "FORKING", "MIGRATING", "RESIZING", "RESTORING", "POWERING_ON", + "UNHEALTHY", and "UPGRADING". + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_knowledge_base_request( + uuid=uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def update_knowledge_base( + self, + uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Knowledge Base. + + To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "database_id": "str", # Optional. The id of the DigitalOcean database this + knowledge base will use, optional. + "name": "str", # Optional. Knowledge base name. + "project_id": "str", # Optional. The id of the DigitalOcean project this + knowledge base will belong to. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize your knowledge base. + ], + "uuid": "str" # Optional. Knowledge base id. + } + + # response body for status code(s): 200 + response == { + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_knowledge_base( + self, + uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Knowledge Base. + + To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_knowledge_base( + self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Knowledge Base. + + To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "database_id": "str", # Optional. The id of the DigitalOcean database this + knowledge base will use, optional. + "name": "str", # Optional. Knowledge base name. + "project_id": "str", # Optional. The id of the DigitalOcean project this + knowledge base will belong to. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize your knowledge base. + ], + "uuid": "str" # Optional. Knowledge base id. + } + + # response body for status code(s): 200 + response == { + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_update_knowledge_base_request( + uuid=uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Delete a Knowledge Base. + + To delete a knowledge base, send a DELETE request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "uuid": "str" # Optional. The id of the deleted knowledge base. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_genai_update_knowledge_base_data_source_request( - knowledge_base_uuid=knowledge_base_uuid, - data_source_uuid=data_source_uuid, - content_type=content_type, - json=_json, - content=_content, + _request = build_genai_delete_knowledge_base_request( + uuid=uuid, headers=_headers, params=_params, ) @@ -236164,20 +249070,141 @@ async def update_knowledge_base_data_source( return cast(JSON, deserialized) # type: ignore + @overload + async def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Model Evaluation Dataset File Upload. + + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Model Evaluation Dataset File Upload. + + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + @distributed_trace_async - async def delete_knowledge_base_data_source( - self, knowledge_base_uuid: str, data_source_uuid: str, **kwargs: Any + async def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Delete a Data Source from a Knowledge Base. + """Create Presigned URLs for Model Evaluation Dataset File Upload. - To delete a data source from a knowledge base, send a DELETE request to - ``/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}``. + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. - :param knowledge_base_uuid: Knowledge base id. Required. - :type knowledge_base_uuid: str - :param data_source_uuid: Data source id. Required. - :type data_source_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -236185,10 +249212,33 @@ async def delete_knowledge_base_data_source( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + # response body for status code(s): 200 response == { - "data_source_uuid": "str", # Optional. Data source id. - "knowledge_base_uuid": "str" # Optional. Knowledge base id. + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] } # response body for status code(s): 404 response == { @@ -236215,14 +249265,29 @@ async def delete_knowledge_base_data_source( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_delete_knowledge_base_data_source_request( - knowledge_base_uuid=knowledge_base_uuid, - data_source_uuid=data_source_uuid, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_create_model_eval_dataset_upload_presigned_urls_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -236282,17 +249347,13 @@ async def delete_knowledge_base_data_source( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_indexing_jobs_by_knowledge_base( - self, knowledge_base_uuid: str, **kwargs: Any - ) -> JSON: + async def list_model_evaluation_metrics(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Indexing Jobs for a Knowledge Base. + """List Model Evaluation Metrics. - To list latest 15 indexing jobs for a knowledge base, send a GET request to - ``/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs``. + To list all available metrics for model evaluation, send a GET request to + ``/v2/genai/model_evaluation_metrics``. - :param knowledge_base_uuid: Knowledge base uuid in string. Required. - :type knowledge_base_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -236302,97 +249363,47 @@ async def list_indexing_jobs_by_knowledge_base( # response body for status code(s): 200 response == { - "jobs": [ + "metrics": [ { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. The indexing jobs. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. The - indexing jobs. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. The - indexing jobs. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - } - ], - "links": { - "pages": { - "first": "str", # Optional. First page. - "last": "str", # Optional. Last page. - "next": "str", # Optional. Next page. - "previous": "str" # Optional. Previous page. + "category": "METRIC_CATEGORY_UNSPECIFIED", # Optional. + Default value is "METRIC_CATEGORY_UNSPECIFIED". Known values are: + "METRIC_CATEGORY_UNSPECIFIED", "METRIC_CATEGORY_CORRECTNESS", + "METRIC_CATEGORY_USER_OUTCOMES", "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. List of model evaluation + metrics. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", # + Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. List of model evaluation + metrics. + "metric_name": "str", # Optional. List of model evaluation + metrics. + "metric_rank": 0, # Optional. List of model evaluation + metrics. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. + Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. List of model evaluation + metrics. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for the + metric. + "range_min": 0.0 # Optional. The minimum value for the + metric. } - }, - "meta": { - "page": 0, # Optional. The current page. - "pages": 0, # Optional. Total number of pages. - "total": 0 # Optional. Total amount of items over all pages. - } + ] } # response body for status code(s): 404 response == { @@ -236424,8 +249435,7 @@ async def list_indexing_jobs_by_knowledge_base( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_list_indexing_jobs_by_knowledge_base_request( - knowledge_base_uuid=knowledge_base_uuid, + _request = build_genai_list_model_evaluation_metrics_request( headers=_headers, params=_params, ) @@ -236485,15 +249495,33 @@ async def list_indexing_jobs_by_knowledge_base( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: + async def list_model_evaluation_runs( + self, + *, + eval_preset_uuid: Optional[str] = None, + status: str = "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve Information About an Existing Knowledge Base. - - To retrive information about an existing knowledge base, send a GET request to - ``/v2/gen-ai/knowledge_bases/{uuid}``. - - :param uuid: Knowledge base id. Required. - :type uuid: str + """List Model Evaluation Runs. + + To list model evaluation runs, send a GET request to ``/v2/genai/model_evaluation_runs``. + + :keyword eval_preset_uuid: UUID of the evaluation preset to filter by. Default value is None. + :paramtype eval_preset_uuid: str + :keyword status: Filter by evaluation run status. Known values are: + "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", "MODEL_EVALUATION_RUN_QUEUED", + "MODEL_EVALUATION_RUN_RUNNING_DATASET", "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", + "MODEL_EVALUATION_RUN_CANCELLING", "MODEL_EVALUATION_RUN_CANCELLED", + "MODEL_EVALUATION_RUN_SUCCESSFUL", "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". + :paramtype status: str + :keyword page: Page number. Default value is None. + :paramtype page: int + :keyword per_page: Items per page. Default value is None. + :paramtype per_page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -236503,107 +249531,56 @@ async def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "database_status": "CREATING", # Optional. Default value is "CREATING". - Known values are: "CREATING", "ONLINE", "POWEROFF", "REBUILDING", "REBALANCING", - "DECOMMISSIONED", "FORKING", "MIGRATING", "RESIZING", "RESTORING", "POWERING_ON", - and "UNHEALTHY". - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "runs": [ + { + "candidate_model_name": "str", # Optional. Name of the + candidate model being evaluated. + "candidate_model_source": + "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. Default value is + "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs against the + serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the + candidate model being evaluated. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the run was created. + "dataset_name": "str", # Optional. Name of the dataset used + for evaluation. + "dataset_uuid": "str", # Optional. UUID of the dataset used + for evaluation. + "eval_run_uuid": "str", # Optional. UUID of the evaluation + run. + "judge_model_name": "str", # Optional. Summary view of + evaluation runs for the run history list. + "judge_model_uuid": "str", # Optional. Judge model used to + score responses. + "name": "str", # Optional. Name of the evaluation run. + "status": "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED" # + Optional. Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". + Model Evaluation Run Statuses. Known values are: + "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", "MODEL_EVALUATION_RUN_QUEUED", + "MODEL_EVALUATION_RUN_RUNNING_DATASET", + "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", + "MODEL_EVALUATION_RUN_CANCELLING", "MODEL_EVALUATION_RUN_CANCELLED", + "MODEL_EVALUATION_RUN_SUCCESSFUL", + "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". + } + ] } # response body for status code(s): 404 response == { @@ -236635,8 +249612,11 @@ async def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_get_knowledge_base_request( - uuid=uuid, + _request = build_genai_list_model_evaluation_runs_request( + eval_preset_uuid=eval_preset_uuid, + status=status, + page=page, + per_page=per_page, headers=_headers, params=_params, ) @@ -236696,21 +249676,18 @@ async def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def update_knowledge_base( + async def create_model_evaluation_run( self, - uuid: str, body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a Knowledge Base. + """Create Model Evaluation Run. - To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. - :param uuid: Knowledge base id. Required. - :type uuid: str :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -236725,118 +249702,55 @@ async def update_knowledge_base( # JSON input template you can fill out and use as your body input. body = { - "database_id": "str", # Optional. The id of the DigitalOcean database this - knowledge base will use, optiona. - "embedding_model_uuid": "str", # Optional. Identifier for the foundation - model. - "name": "str", # Optional. Knowledge base name. - "project_id": "str", # Optional. The id of the DigitalOcean project this - knowledge base will belong to. - "tags": [ - "str" # Optional. Tags to organize your knowledge base. + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for the + candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model slug used to call the + candidate model API. For dedicated inference, this is the model slug from the + deployment. For serverless, this should match the model's internal name. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. + Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs + against the serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the candidate model to + evaluate. + "dataset_uuid": "str", # Optional. UUID of the dataset to use for + evaluation. + "eval_preset_uuid": "str", # Optional. + "judge_model_uuid": "str", # Optional. UUID of the judge model used to score + responses. + "metric_uuids": [ + "str" # Optional. UUIDs of metrics to evaluate (selected from + ListModelEvaluationMetrics). ], - "uuid": "str" # Optional. Knowledge base id. + "name": "str", # Optional. + "preset_name": "str", # Optional. + "save_as_preset": bool, # Optional. .. role:: raw-html-m2r(raw) :format: + html If true, saves the inline config as a reusable preset"" + :raw-html-m2r:`
` Ignored when eval_preset_uuid is provided. + "source": "str", # Optional. Source of the run creation (api, sdk, cli). + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + } } # response body for status code(s): 200 response == { - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. } # response body for status code(s): 404 response == { @@ -236852,21 +249766,18 @@ async def update_knowledge_base( """ @overload - async def update_knowledge_base( + async def create_model_evaluation_run( self, - uuid: str, body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a Knowledge Base. + """Create Model Evaluation Run. - To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. - :param uuid: Knowledge base id. Required. - :type uuid: str :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -236881,103 +249792,7 @@ async def update_knowledge_base( # response body for status code(s): 200 response == { - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. } # response body for status code(s): 404 response == { @@ -236993,16 +249808,14 @@ async def update_knowledge_base( """ @distributed_trace_async - async def update_knowledge_base( - self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + async def create_model_evaluation_run( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a Knowledge Base. + """Create Model Evaluation Run. - To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. - :param uuid: Knowledge base id. Required. - :type uuid: str :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object @@ -237014,118 +249827,55 @@ async def update_knowledge_base( # JSON input template you can fill out and use as your body input. body = { - "database_id": "str", # Optional. The id of the DigitalOcean database this - knowledge base will use, optiona. - "embedding_model_uuid": "str", # Optional. Identifier for the foundation - model. - "name": "str", # Optional. Knowledge base name. - "project_id": "str", # Optional. The id of the DigitalOcean project this - knowledge base will belong to. - "tags": [ - "str" # Optional. Tags to organize your knowledge base. + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for the + candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model slug used to call the + candidate model API. For dedicated inference, this is the model slug from the + deployment. For serverless, this should match the model's internal name. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. + Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs + against the serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the candidate model to + evaluate. + "dataset_uuid": "str", # Optional. UUID of the dataset to use for + evaluation. + "eval_preset_uuid": "str", # Optional. + "judge_model_uuid": "str", # Optional. UUID of the judge model used to score + responses. + "metric_uuids": [ + "str" # Optional. UUIDs of metrics to evaluate (selected from + ListModelEvaluationMetrics). ], - "uuid": "str" # Optional. Knowledge base id. + "name": "str", # Optional. + "preset_name": "str", # Optional. + "save_as_preset": bool, # Optional. .. role:: raw-html-m2r(raw) :format: + html If true, saves the inline config as a reusable preset"" + :raw-html-m2r:`
` Ignored when eval_preset_uuid is provided. + "source": "str", # Optional. Source of the run creation (api, sdk, cli). + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + } } # response body for status code(s): 200 response == { - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. - }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. } # response body for status code(s): 404 response == { @@ -237171,8 +249921,7 @@ async def update_knowledge_base( else: _json = None - _request = build_genai_update_knowledge_base_request( - uuid=uuid, + _request = build_genai_create_model_evaluation_run_request( content_type=content_type, json=_json, content=_content, @@ -237235,14 +249984,27 @@ async def update_knowledge_base( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def delete_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: + async def get_model_evaluation_run( + self, + eval_run_uuid: str, + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete a Knowledge Base. + """Retrieve Model Evaluation Run. - To delete a knowledge base, send a DELETE request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + To retrieve a model evaluation run, send a GET request to + ``/v2/genai/model_evaluation_runs/{eval_run_uuid}``. - :param uuid: Knowledge base id. Required. - :type uuid: str + :param eval_run_uuid: UUID of the evaluation run. Required. + :type eval_run_uuid: str + :keyword page: Page number for per-prompt results (defaults to 1). Default value is None. + :paramtype page: int + :keyword per_page: Number of per-prompt results per page (defaults to 50). Default value is + None. + :paramtype per_page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -237252,7 +250014,323 @@ async def delete_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "uuid": "str" # Optional. The id of the deleted knowledge base. + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "results": [ + { + "candidate_model_name": "str", # Optional. Paginated + per-prompt evaluation results. + "candidate_model_uuid": "str", # Optional. Paginated + per-prompt evaluation results. + "ground_truth": "str", # Optional. Paginated per-prompt + evaluation results. + "input": "str", # Optional. The input query sent to the + candidate model. + "metric_results": [ + { + "error_description": "str", # Optional. + Error description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric + name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is + "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value + of the metric as a number. + "reasoning": "str", # Optional. Reasoning of + the metric result. + "string_value": "str" # Optional. The value + of the metric as a string. + } + ], + "output": "str" # Optional. The response from the candidate + model. + } + ], + "run": { + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for + the candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration + for the candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for + the candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model Evaluation Run + Detail - full view returned when fetching a specific run. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # + Optional. Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether + inference runs against the serverless platform, a dedicated deployment, or a + model router. Known values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. Candidate model being + evaluated. + "completed_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "created_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "dataset_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "dataset_uuid": "str", # Optional. Dataset used for the evaluation. + "error_description": "str", # Optional. Error description if the run + failed or partially succeeded. + "eval_preset_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "eval_preset_uuid": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "eval_run_uuid": "str", # Optional. UUID of the evaluation run. + "judge_model_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "judge_model_uuid": "str", # Optional. Judge model used to score + responses. + "metrics": [ + { + "category": "METRIC_CATEGORY_UNSPECIFIED", # + Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known + values are: "METRIC_CATEGORY_UNSPECIFIED", + "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", + "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. Metrics selected + for this evaluation. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", + # Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope + that determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to + agent metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. Metrics selected + for this evaluation. + "metric_name": "str", # Optional. Metrics selected + for this evaluation. + "metric_rank": 0, # Optional. Metrics selected for + this evaluation. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values + are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. Metrics selected + for this evaluation. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", + # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known + values are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for + the metric. + "range_min": 0.0 # Optional. The minimum value for + the metric. + } + ], + "name": "str", # Optional. Name of the evaluation run. + "result_summary": { + "end_time": "2020-02-20 00:00:00", # Optional. Aggregated + result summary for a completed model evaluation run. + "metric_summaries": [ + { + "description": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "fail_percent": 0.0, # Optional. Per-metric + aggregated pass/fail statistics. + "metric_name": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "metric_uuid": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "pass_percent": 0.0 # Optional. Per-metric + aggregated pass/fail statistics. + } + ], + "overall_score_percent": 0.0, # Optional. Aggregated result + summary for a completed model evaluation run. + "per_model_summaries": { + "summaries": [ + { + "metric_summaries": [ + { + "description": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "fail_percent": 0.0, + # Optional. Per-metric pass/fail for only this + model's prompts. + "metric_name": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "metric_uuid": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "pass_percent": 0.0 + # Optional. Per-metric pass/fail for only this + model's prompts. + } + ], + "model_name": "str", # Optional. + Name/slug of the model (matches routed_model from results). + "performance_metrics": { + "candidate_latency": { + "avg_e2e_latency_ms": + 0.0, # Optional. Average end-to-end latency across + all invocations. + "max_e2e_latency_ms": + 0.0, # Optional. Maximum end-to-end latency + observed. + "min_e2e_latency_ms": + 0.0, # Optional. Minimum end-to-end latency + observed. + "p50_latency_ms": + 0.0, # Optional. P50 (median) latency. + "p90_latency_ms": + 0.0, # Optional. P90 latency. + "p95_latency_ms": 0.0 + # Optional. P95 latency. + }, + "token_usage": { + "total_candidate_input_tokens": "str", # Optional. + All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_output_tokens": "str", # Optional. + All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_input_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_output_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_tokens": + "str" # Optional. All performance metrics are for + the candidate model unless noted otherwise. + } + }, + "prompt_count": 0 # Optional. Number + of prompts routed to this model. + } + ] + }, + "performance_metrics": { + "candidate_latency": { + "avg_e2e_latency_ms": 0.0, # Optional. + Average end-to-end latency across all invocations. + "max_e2e_latency_ms": 0.0, # Optional. + Maximum end-to-end latency observed. + "min_e2e_latency_ms": 0.0, # Optional. + Minimum end-to-end latency observed. + "p50_latency_ms": 0.0, # Optional. P50 + (median) latency. + "p90_latency_ms": 0.0, # Optional. P90 + latency. + "p95_latency_ms": 0.0 # Optional. P95 + latency. + }, + "token_usage": { + "total_candidate_input_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_output_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_tokens": "str", # Optional. + All performance metrics are for the candidate model unless noted + otherwise. + "total_judge_input_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_judge_output_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_judge_tokens": "str" # Optional. All + performance metrics are for the candidate model unless noted + otherwise. + } + }, + "pricing": { + "currency": "str", # Optional. Currency code (e.g., + "USD"). + "judge_model_pricing": { + "input_cost": 0.0, # Optional. Cost of input + tokens. + "output_cost": 0.0, # Optional. Cost of + output tokens. + "total_cost": 0.0 # Optional. Total cost + (input + output). + }, + "per_candidate_model_pricing": [ + { + "model_name": "str", # Optional. + Model name (for display purposes). + "model_uuid": "str", # Optional. + Model UUID. + "pricing": { + "input_cost": 0.0, # + Optional. Cost of input tokens. + "output_cost": 0.0, # + Optional. Cost of output tokens. + "total_cost": 0.0 # + Optional. Total cost (input + output). + }, + "prompt_count": 0 # Optional. Number + of prompts/rows routed to this model. + } + ], + "total_cost": 0.0 # Optional. Total cost of the + evaluation run (all candidates + judge). + }, + "star_metric_summary": { + "metric_name": "str", # Optional. Star metric + summary with identifying details and threshold. + "metric_uuid": "str", # Optional. Star metric + summary with identifying details and threshold. + "threshold": 0.0 # Optional. Star metric summary + with identifying details and threshold. + }, + "start_time": "2020-02-20 00:00:00", # Optional. Run timing. + "total_duration_seconds": 0 # Optional. Total wall-clock + duration in seconds. + }, + "star_metric": { + "metric_uuid": "str", # Optional. Model Evaluation Run + Detail - full view returned when fetching a specific run. + "name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "success_threshold": 0.0, # Optional. The success threshold + for the star metric. This is a value that the metric must reach to be + considered successful. + "success_threshold_pct": 0 # Optional. The success threshold + for the star metric. This is a percentage value between 0 and 100. + }, + "started_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "status": "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED" # Optional. + Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". Model Evaluation + Run Statuses. Known values are: "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", + "MODEL_EVALUATION_RUN_QUEUED", "MODEL_EVALUATION_RUN_RUNNING_DATASET", + "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", "MODEL_EVALUATION_RUN_CANCELLING", + "MODEL_EVALUATION_RUN_CANCELLED", "MODEL_EVALUATION_RUN_SUCCESSFUL", + "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". + } } # response body for status code(s): 404 response == { @@ -237284,8 +250362,125 @@ async def delete_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_genai_delete_knowledge_base_request( - uuid=uuid, + _request = build_genai_get_model_evaluation_run_request( + eval_run_uuid=eval_run_uuid, + page=page, + per_page=per_page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_model_evaluation_run_results_download_url( # pylint: disable=name-too-long + self, eval_run_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Get Download URL for Model Evaluation Run Results. + + To get a presigned download URL for model evaluation run results (gzip-compressed JSON), send a + GET request to ``/v2/genai/model_evaluation_runs/{eval_run_uuid}/results/download_url``. + + :param eval_run_uuid: UUID of the evaluation run. Required. + :type eval_run_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "download_url": "str", # Optional. The presigned URL to download the + gzip-compressed JSON results file (.json.gz). + "expires_at": "2020-02-20 00:00:00" # Optional. The time the URL expires at. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_model_evaluation_run_results_download_url_request( + eval_run_uuid=eval_run_uuid, headers=_headers, params=_params, ) @@ -237407,8 +250602,28 @@ async def list_models( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. Model capabilities (inference, + reasoning, vectorization, etc.). + ], + "context_window": "str", # Optional. Context window (maximum + tokens). "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "description": "str", # Optional. Model description. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "id": "str", # Optional. Human-readable model identifier. "is_foundational": bool, # Optional. True if it is a foundational model provided by do. @@ -237418,9 +250633,61 @@ async def list_models( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Display name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. + "pricing": { + "input_price_per_million": 0.0, # Optional. Pricing + per million tokens (aligns with existing ModelPrice pattern). + "output_price_per_million": 0.0 # Optional. Pricing + per million tokens (aligns with existing ModelPrice pattern). + }, + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. + Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: + "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and + "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -238805,8 +252072,27 @@ async def list_openai_api_keys( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -238820,16 +252106,64 @@ async def list_openai_api_keys( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -239010,211 +252344,358 @@ async def create_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, - "created_at": "2020-02-20 00:00:00", # Optional. - Creation date / time. - "inference_name": "str", # Optional. Internally used - name. - "inference_version": "str", # Optional. Internally - used version. - "is_foundational": bool, # Optional. True if it is a - foundational model provided by do. - "kb_default_chunk_size": 0, # Optional. Default - chunking size limit to show in UI. - "kb_max_chunk_size": 0, # Optional. Maximum chunk - size limit of model. - "kb_min_chunk_size": 0, # Optional. Minimum chunking - size token limits if model supports KNOWLEDGEBASE usecase. - "metadata": {}, # Optional. Additional meta data. - "name": "str", # Optional. Name of the model. - "parent_uuid": "str", # Optional. Unique id of the - model, this model is based on. - "provider": "MODEL_PROVIDER_DIGITALOCEAN", # - Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known - values are: "MODEL_PROVIDER_DIGITALOCEAN", - "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". - "updated_at": "2020-02-20 00:00:00", # Optional. - Last modified. - "upload_complete": bool, # Optional. Model has been - fully uploaded. - "url": "str", # Optional. Download url. - "usecases": [ - "str" # Optional. Usecases of the model. + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], + "inference_name": "str", # Optional. Internally used + name. + "inference_version": "str", # Optional. Internally + used version. + "is_foundational": bool, # Optional. True if it is a + foundational model provided by do. + "kb_default_chunk_size": 0, # Optional. Default + chunking size limit to show in UI. + "kb_max_chunk_size": 0, # Optional. Maximum chunk + size limit of model. + "kb_min_chunk_size": 0, # Optional. Minimum chunking + size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). + "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, + "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. + "parent_uuid": "str", # Optional. Unique id of the + model, this model is based on. + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # + Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known + values are: "MODEL_PROVIDER_DIGITALOCEAN", + "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "upload_complete": bool, # Optional. Model has been + fully uploaded. + "url": "str", # Optional. Download url. + "usecases": [ + "str" # Optional. Usecases of the model. + ], + "uuid": "str", # Optional. Unique id. + "version": { + "major": 0, # Optional. Major version + number. + "minor": 0, # Optional. Minor version + number. + "patch": 0 # Optional. Patch version number. + } + } + ], + "name": "str", # Optional. Name. + "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated + date. + "uuid": "str" # Optional. Uuid. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_openai_api_key( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create OpenAI API Key. + + To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "api_key_info": { + "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. + "created_by": "str", # Optional. Created by user id from DO. + "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. + "models": [ + { + "agreement": { + "description": "str", # Optional. Agreement + Description. + "name": "str", # Optional. Agreement + Description. + "url": "str", # Optional. Agreement + Description. + "uuid": "str" # Optional. Agreement + Description. + }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], + "inference_name": "str", # Optional. Internally used + name. + "inference_version": "str", # Optional. Internally + used version. + "is_foundational": bool, # Optional. True if it is a + foundational model provided by do. + "kb_default_chunk_size": 0, # Optional. Default + chunking size limit to show in UI. + "kb_max_chunk_size": 0, # Optional. Maximum chunk + size limit of model. + "kb_min_chunk_size": 0, # Optional. Minimum chunking + size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). + "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, + "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. + "parent_uuid": "str", # Optional. Unique id of the + model, this model is based on. + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # + Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known + values are: "MODEL_PROVIDER_DIGITALOCEAN", + "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "upload_complete": bool, # Optional. Model has been + fully uploaded. + "url": "str", # Optional. Download url. + "usecases": [ + "str" # Optional. Usecases of the model. + ], + "uuid": "str", # Optional. Unique id. + "version": { + "major": 0, # Optional. Major version + number. + "minor": 0, # Optional. Minor version + number. + "patch": 0 # Optional. Patch version number. + } + } + ], + "name": "str", # Optional. Name. + "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated + date. + "uuid": "str" # Optional. Uuid. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create_openai_api_key( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create OpenAI API Key. + + To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "api_key": "str", # Optional. OpenAI API key. + "name": "str" # Optional. Name of the key. + } + + # response body for status code(s): 200 + response == { + "api_key_info": { + "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. + "created_by": "str", # Optional. Created by user id from DO. + "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. + "models": [ + { + "agreement": { + "description": "str", # Optional. Agreement + Description. + "name": "str", # Optional. Agreement + Description. + "url": "str", # Optional. Agreement + Description. + "uuid": "str" # Optional. Agreement + Description. + }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). ], - "uuid": "str", # Optional. Unique id. - "version": { - "major": 0, # Optional. Major version - number. - "minor": 0, # Optional. Minor version - number. - "patch": 0 # Optional. Patch version number. - } - } - ], - "name": "str", # Optional. Name. - "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated - date. - "uuid": "str" # Optional. Uuid. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def create_openai_api_key( - self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create OpenAI API Key. - - To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. - - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "api_key_info": { - "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. - "created_by": "str", # Optional. Created by user id from DO. - "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. - "models": [ - { - "agreement": { - "description": "str", # Optional. Agreement - Description. - "name": "str", # Optional. Agreement - Description. - "url": "str", # Optional. Agreement - Description. - "uuid": "str" # Optional. Agreement - Description. - }, + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. - "inference_name": "str", # Optional. Internally used - name. - "inference_version": "str", # Optional. Internally - used version. - "is_foundational": bool, # Optional. True if it is a - foundational model provided by do. - "kb_default_chunk_size": 0, # Optional. Default - chunking size limit to show in UI. - "kb_max_chunk_size": 0, # Optional. Maximum chunk - size limit of model. - "kb_min_chunk_size": 0, # Optional. Minimum chunking - size token limits if model supports KNOWLEDGEBASE usecase. - "metadata": {}, # Optional. Additional meta data. - "name": "str", # Optional. Name of the model. - "parent_uuid": "str", # Optional. Unique id of the - model, this model is based on. - "provider": "MODEL_PROVIDER_DIGITALOCEAN", # - Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known - values are: "MODEL_PROVIDER_DIGITALOCEAN", - "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". - "updated_at": "2020-02-20 00:00:00", # Optional. - Last modified. - "upload_complete": bool, # Optional. Model has been - fully uploaded. - "url": "str", # Optional. Download url. - "usecases": [ - "str" # Optional. Usecases of the model. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } ], - "uuid": "str", # Optional. Unique id. - "version": { - "major": 0, # Optional. Major version - number. - "minor": 0, # Optional. Minor version - number. - "patch": 0 # Optional. Patch version number. - } - } - ], - "name": "str", # Optional. Name. - "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated - date. - "uuid": "str" # Optional. Uuid. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def create_openai_api_key( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create OpenAI API Key. - - To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. - - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "api_key": "str", # Optional. OpenAI API key. - "name": "str" # Optional. Name of the key. - } - - # response body for status code(s): 200 - response == { - "api_key_info": { - "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. - "created_by": "str", # Optional. Created by user id from DO. - "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. - "models": [ - { - "agreement": { - "description": "str", # Optional. Agreement - Description. - "name": "str", # Optional. Agreement - Description. - "url": "str", # Optional. Agreement - Description. - "uuid": "str" # Optional. Agreement - Description. - }, - "created_at": "2020-02-20 00:00:00", # Optional. - Creation date / time. "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239227,14 +252708,59 @@ async def create_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239400,8 +252926,27 @@ async def get_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239414,14 +252959,59 @@ async def get_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239589,8 +253179,27 @@ async def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239603,14 +253212,59 @@ async def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239694,8 +253348,27 @@ async def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239708,14 +253381,59 @@ async def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239801,8 +253519,27 @@ async def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239815,14 +253552,59 @@ async def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239989,8 +253771,27 @@ async def delete_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -240003,14 +253804,59 @@ async def delete_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -240408,6 +254254,12 @@ async def list_agents_by_openai_key( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -240435,6 +254287,24 @@ async def list_agents_by_openai_key( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -240446,8 +254316,27 @@ async def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -240460,14 +254349,59 @@ async def list_agents_by_openai_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -240505,8 +254439,27 @@ async def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -240520,10 +254473,25 @@ async def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -240531,6 +254499,42 @@ async def list_agents_by_openai_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -240561,6 +254565,45 @@ async def list_agents_by_openai_key( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -240581,8 +254624,27 @@ async def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -240596,10 +254658,25 @@ async def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -240607,6 +254684,42 @@ async def list_agents_by_openai_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -240640,6 +254753,8 @@ async def list_agents_by_openai_key( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -240787,6 +254902,12 @@ async def list_agents_by_openai_key( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -240814,8 +254935,27 @@ async def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -240829,16 +254969,64 @@ async def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -240882,6 +255070,8 @@ async def list_agents_by_openai_key( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -240924,6 +255114,13 @@ async def list_agents_by_openai_key( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -240954,6 +255151,16 @@ async def list_agents_by_openai_key( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -240967,8 +255174,10 @@ async def list_agents_by_openai_key( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -242138,6 +256347,12 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -242165,6 +256380,27 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: Name of the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. + Optional list of allowed tool names to expose from + this server. + ], + "authorization": "str", # + Optional. Optional authorization header value for the MCP + server. + "headers": { + "str": "str" # + Optional. Optional additional headers to send to the + MCP server. + }, + "server_label": "str", # + Optional. A label identifying this MCP server. + "server_url": "str" # + Optional. The URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # @@ -242176,8 +256412,27 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -242191,10 +256446,25 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -242202,6 +256472,42 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -242244,9 +256550,30 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": + {}, # Optional. Benchmark scores for this model, + stored as arbitrary JSON. + "capabilities": [ + "str" # + Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": + "str", # Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities supported + by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": @@ -242261,10 +256588,26 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": + "str", # Optional. Lifecycle status of the model + (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" + # Optional. Input/output modalities. + ], + "output": [ + "str" + # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": + 0.0, # Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. @@ -242274,6 +256617,43 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": + [ + "str" # + Optional. Available reasoning efforts for this + model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended + thinking (Anthropic models). + "type": "str", # + Optional. Model type (chat, embedding, image, + reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": @@ -242306,6 +256686,46 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" # Optional. Task name. + }, + "models": [ + "str" + # Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. One of: none, + cheapest, fastest. + }, + "task_slug": + "str" # Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", + # Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of + the model router. + "regions": [ + "str" # Optional. Target + regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", + # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", @@ -242326,9 +256746,30 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": + {}, # Optional. Benchmark scores for this model, + stored as arbitrary JSON. + "capabilities": [ + "str" # + Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": + "str", # Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities supported + by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": @@ -242343,10 +256784,26 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": + "str", # Optional. Lifecycle status of the model + (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" + # Optional. Input/output modalities. + ], + "output": [ + "str" + # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": + 0.0, # Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. @@ -242356,6 +256813,43 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": + [ + "str" # + Optional. Available reasoning efforts for this + model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended + thinking (Anthropic models). + "type": "str", # + Optional. Model type (chat, embedding, image, + reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": @@ -242389,6 +256883,8 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The + reasoning effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is @@ -242555,6 +257051,13 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": + bool, # Optional. Whether reranking is enabled + for retrieval. + "model": + "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -242582,8 +257085,31 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -242598,10 +257124,25 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -242609,6 +257150,44 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -242655,6 +257234,9 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: # Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The + thinking token budget for Anthropic extended thinking (0 = + disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -242697,6 +257279,13 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -242727,6 +257316,15 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -242740,7 +257338,8 @@ async def list_workspaces(self, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -243183,6 +257782,12 @@ async def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -243210,6 +257815,24 @@ async def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -243221,8 +257844,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -243236,16 +257878,64 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -243286,8 +257976,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -243302,10 +258015,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -243313,6 +258041,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -243343,6 +258109,48 @@ async def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -243363,8 +258171,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -243379,10 +258210,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -243390,6 +258236,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -243423,6 +258307,8 @@ async def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -243581,6 +258467,12 @@ async def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -243608,8 +258500,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -243623,10 +258534,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -243634,6 +258560,42 @@ async def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -243680,6 +258642,8 @@ async def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -243719,6 +258683,12 @@ async def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -243745,6 +258715,14 @@ async def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -243757,8 +258735,9 @@ async def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -244111,6 +259090,12 @@ async def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -244138,6 +259123,24 @@ async def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -244149,8 +259152,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -244164,16 +259186,64 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -244214,8 +259284,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -244230,10 +259323,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -244241,6 +259349,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -244271,6 +259417,48 @@ async def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -244291,8 +259479,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -244307,10 +259518,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -244318,6 +259544,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -244351,6 +259615,8 @@ async def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -244509,6 +259775,12 @@ async def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -244536,8 +259808,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -244551,10 +259842,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -244562,6 +259868,42 @@ async def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -244608,6 +259950,8 @@ async def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -244647,6 +259991,12 @@ async def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -244673,6 +260023,14 @@ async def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -244685,8 +260043,9 @@ async def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -245041,6 +260400,12 @@ async def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -245068,6 +260433,24 @@ async def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -245079,8 +260462,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -245094,16 +260496,64 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -245144,8 +260594,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -245160,10 +260633,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -245171,6 +260659,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -245201,6 +260727,48 @@ async def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -245221,8 +260789,31 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -245237,10 +260828,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -245248,6 +260854,44 @@ async def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -245281,6 +260925,8 @@ async def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -245439,6 +261085,12 @@ async def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -245466,8 +261118,27 @@ async def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -245481,10 +261152,25 @@ async def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -245492,6 +261178,42 @@ async def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -245538,6 +261260,8 @@ async def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -245577,6 +261301,12 @@ async def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -245603,6 +261333,14 @@ async def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -245615,8 +261353,9 @@ async def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -246053,6 +261792,12 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -246080,6 +261825,24 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -246091,8 +261854,27 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -246106,16 +261888,64 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -246156,8 +261986,31 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -246172,10 +262025,25 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -246183,6 +262051,44 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -246213,6 +262119,48 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -246233,8 +262181,31 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -246249,10 +262220,25 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -246260,6 +262246,44 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -246293,6 +262317,8 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -246451,6 +262477,12 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -246478,8 +262510,27 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -246493,10 +262544,25 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -246504,6 +262570,42 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -246550,6 +262652,8 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -246589,6 +262693,12 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -246615,6 +262725,14 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -246627,8 +262745,9 @@ async def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -247068,6 +263187,12 @@ async def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -247095,6 +263220,24 @@ async def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -247106,8 +263249,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -247121,16 +263283,64 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -247171,8 +263381,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -247187,10 +263420,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -247198,6 +263446,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -247228,6 +263514,48 @@ async def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -247248,8 +263576,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -247264,10 +263615,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -247275,6 +263641,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -247308,6 +263712,8 @@ async def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -247466,6 +263872,12 @@ async def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -247493,8 +263905,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -247508,10 +263939,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -247519,6 +263965,42 @@ async def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -247565,6 +264047,8 @@ async def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -247604,6 +264088,12 @@ async def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -247630,6 +264120,14 @@ async def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -247642,8 +264140,9 @@ async def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -247999,6 +264498,12 @@ async def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -248026,6 +264531,24 @@ async def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -248037,8 +264560,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -248052,16 +264594,64 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -248102,8 +264692,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -248118,10 +264731,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -248129,6 +264757,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -248159,6 +264825,48 @@ async def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -248179,8 +264887,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -248195,10 +264926,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -248206,6 +264952,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -248239,6 +265023,8 @@ async def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -248397,6 +265183,12 @@ async def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -248424,8 +265216,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -248439,10 +265250,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -248450,6 +265276,42 @@ async def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -248496,6 +265358,8 @@ async def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -248535,6 +265399,12 @@ async def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -248561,6 +265431,14 @@ async def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -248573,8 +265451,9 @@ async def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -248932,6 +265811,12 @@ async def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -248959,6 +265844,24 @@ async def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -248970,8 +265873,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -248985,16 +265907,64 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -249035,8 +266005,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -249051,10 +266044,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -249062,6 +266070,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -249092,6 +266138,48 @@ async def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -249112,8 +266200,31 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -249128,10 +266239,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -249139,6 +266265,44 @@ async def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -249172,6 +266336,8 @@ async def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -249330,6 +266496,12 @@ async def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -249357,8 +266529,27 @@ async def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -249372,10 +266563,25 @@ async def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -249383,6 +266589,42 @@ async def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -249429,6 +266671,8 @@ async def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -249468,6 +266712,12 @@ async def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -249494,6 +266744,14 @@ async def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -249506,8 +266764,9 @@ async def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -250054,6 +267313,12 @@ async def list_agents_by_workspace( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -250081,6 +267346,24 @@ async def list_agents_by_workspace( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -250092,8 +267375,27 @@ async def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -250106,14 +267408,59 @@ async def list_agents_by_workspace( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -250151,8 +267498,27 @@ async def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -250166,10 +267532,25 @@ async def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -250177,6 +267558,42 @@ async def list_agents_by_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -250207,6 +267624,45 @@ async def list_agents_by_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -250227,8 +267683,27 @@ async def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -250242,10 +267717,25 @@ async def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -250253,6 +267743,42 @@ async def list_agents_by_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -250286,6 +267812,8 @@ async def list_agents_by_workspace( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -250433,6 +267961,12 @@ async def list_agents_by_workspace( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -250460,8 +267994,27 @@ async def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -250475,16 +268028,64 @@ async def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -250528,6 +268129,8 @@ async def list_agents_by_workspace( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -250570,6 +268173,13 @@ async def list_agents_by_workspace( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -250600,6 +268210,16 @@ async def list_agents_by_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -250613,8 +268233,10 @@ async def list_agents_by_workspace( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -251081,6 +268703,12 @@ async def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -251108,6 +268736,24 @@ async def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -251119,8 +268765,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -251134,16 +268799,64 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -251184,8 +268897,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -251200,10 +268936,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -251211,6 +268962,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -251241,6 +269030,48 @@ async def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -251261,8 +269092,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -251277,10 +269131,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -251288,6 +269157,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -251321,6 +269228,8 @@ async def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -251479,6 +269388,12 @@ async def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -251506,8 +269421,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -251521,10 +269455,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -251532,6 +269481,42 @@ async def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -251578,6 +269563,8 @@ async def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -251617,6 +269604,12 @@ async def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -251643,6 +269636,14 @@ async def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -251655,8 +269656,9 @@ async def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -252012,6 +270014,12 @@ async def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -252039,6 +270047,24 @@ async def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -252050,8 +270076,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -252065,16 +270110,64 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -252115,8 +270208,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -252131,10 +270247,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -252142,6 +270273,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -252172,6 +270341,48 @@ async def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -252192,8 +270403,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -252208,10 +270442,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -252219,6 +270468,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -252252,6 +270539,8 @@ async def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -252410,6 +270699,12 @@ async def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -252437,8 +270732,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -252452,10 +270766,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -252463,6 +270792,42 @@ async def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -252509,6 +270874,8 @@ async def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -252548,6 +270915,12 @@ async def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -252574,6 +270947,14 @@ async def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -252586,8 +270967,9 @@ async def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -252946,6 +271328,12 @@ async def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -252973,6 +271361,24 @@ async def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -252984,8 +271390,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -252999,16 +271424,64 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -253049,8 +271522,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -253065,10 +271561,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -253076,6 +271587,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -253106,6 +271655,48 @@ async def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -253126,8 +271717,31 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -253142,10 +271756,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -253153,6 +271782,44 @@ async def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -253186,6 +271853,8 @@ async def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -253344,6 +272013,12 @@ async def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -253371,8 +272046,27 @@ async def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -253386,10 +272080,25 @@ async def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -253397,6 +272106,42 @@ async def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -253443,6 +272188,8 @@ async def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -253482,6 +272229,12 @@ async def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -253508,6 +272261,14 @@ async def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -253520,8 +272281,9 @@ async def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -253703,6 +272465,11 @@ async def list_evaluation_test_cases_by_workspace( Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of the @@ -253726,6 +272493,14 @@ async def list_evaluation_test_cases_by_workspace( "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether a + metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. "is_metric_goal": bool, # Optional. @@ -253734,7 +272509,8 @@ async def list_evaluation_test_cases_by_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index b17b7036..6e0517f2 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -13385,6 +13385,105 @@ def build_genai_list_agents_by_anthropic_key_request( # pylint: disable=name-to ) +def build_genai_list_custom_models_request( + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + status: str = "STATUS_UNSPECIFIED", + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/custom_models" + + # Construct parameters + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int") + if per_page is not None: + _params["per_page"] = _SERIALIZER.query("per_page", per_page, "int") + if status is not None: + _params["status"] = _SERIALIZER.query("status", status, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_genai_import_custom_model_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/custom_models/import" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_genai_delete_custom_model_request(uuid: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/custom_models/{uuid}" + path_format_arguments = { + "uuid": _SERIALIZER.url("uuid", uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_genai_update_custom_model_metadata_request( # pylint: disable=name-too-long + uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/custom_models/{uuid}/metadata" + path_format_arguments = { + "uuid": _SERIALIZER.url("uuid", uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + def build_genai_create_evaluation_dataset_request( **kwargs: Any, ) -> HttpRequest: # pylint: disable=name-too-long @@ -13431,6 +13530,27 @@ def build_genai_create_evaluation_dataset_file_upload_presigned_urls_request( # return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) +def build_genai_get_evaluation_dataset_download_url_request( # pylint: disable=name-too-long + dataset_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/evaluation_datasets/{dataset_uuid}/download_url" + path_format_arguments = { + "dataset_uuid": _SERIALIZER.url("dataset_uuid", dataset_uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + def build_genai_list_evaluation_metrics_request( **kwargs: Any, ) -> HttpRequest: # pylint: disable=name-too-long @@ -14114,6 +14234,159 @@ def build_genai_delete_knowledge_base_request( return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) +def build_genai_create_model_eval_dataset_upload_presigned_urls_request( # pylint: disable=name-too-long + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation/datasets/file_upload_presigned_urls" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_genai_list_model_evaluation_metrics_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation_metrics" + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_genai_list_model_evaluation_runs_request( # pylint: disable=name-too-long + *, + eval_preset_uuid: Optional[str] = None, + status: str = "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation_runs" + + # Construct parameters + if eval_preset_uuid is not None: + _params["eval_preset_uuid"] = _SERIALIZER.query( + "eval_preset_uuid", eval_preset_uuid, "str" + ) + if status is not None: + _params["status"] = _SERIALIZER.query("status", status, "str") + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int") + if per_page is not None: + _params["per_page"] = _SERIALIZER.query("per_page", per_page, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_genai_create_model_evaluation_run_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation_runs" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_genai_get_model_evaluation_run_request( # pylint: disable=name-too-long + eval_run_uuid: str, + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation_runs/{eval_run_uuid}" + path_format_arguments = { + "eval_run_uuid": _SERIALIZER.url("eval_run_uuid", eval_run_uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int") + if per_page is not None: + _params["per_page"] = _SERIALIZER.query("per_page", per_page, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_genai_get_model_evaluation_run_results_download_url_request( # pylint: disable=name-too-long + eval_run_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/gen-ai/model_evaluation_runs/{eval_run_uuid}/results/download_url" + path_format_arguments = { + "eval_run_uuid": _SERIALIZER.url("eval_run_uuid", eval_run_uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + def build_genai_list_models_request( *, usecases: Optional[List[str]] = None, @@ -215138,6 +215411,24 @@ def list_agents( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -215149,8 +215440,27 @@ def list_agents( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -215163,14 +215473,59 @@ def list_agents( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -215188,11 +215543,52 @@ def list_agents( "patch": 0 # Optional. Patch version number. } }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "project_id": "str", # Optional. The DigitalOcean project ID associated with the agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -215344,6 +215740,12 @@ def list_agents( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -215371,8 +215773,27 @@ def list_agents( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -215386,16 +215807,64 @@ def list_agents( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -215439,6 +215908,8 @@ def list_agents( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure @@ -215599,18 +216070,39 @@ def create_agent( "str" # Optional. Ids of the knowledge base(s) to attach to the agent. ], + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI API key ID to use with OpenAI models. "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. + "reasoning_effort": "str", # Optional. "region": "str", # Optional. The DigitalOcean region to deploy your agent in. + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. Agent tag to organize related resources. ], + "thinking_token_budget": 0, # Optional. "workspace_uuid": "str" # Optional. Identifier for the workspace. } @@ -215851,6 +216343,12 @@ def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -215877,6 +216375,24 @@ def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -215885,8 +216401,27 @@ def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -215898,14 +216433,56 @@ def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -215940,8 +216517,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -215955,16 +216551,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -215993,6 +216637,44 @@ def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -216012,8 +216694,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -216027,16 +216728,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -216068,6 +216817,8 @@ def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -216212,6 +216963,12 @@ def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -216239,8 +216996,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -216253,14 +217029,59 @@ def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -216300,6 +217121,8 @@ def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -216339,6 +217162,13 @@ def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -216369,6 +217199,15 @@ def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -216382,7 +217221,8 @@ def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -216706,6 +217546,12 @@ def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -216732,6 +217578,24 @@ def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -216740,8 +217604,27 @@ def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -216753,14 +217636,56 @@ def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -216795,8 +217720,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -216810,16 +217754,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -216848,6 +217840,44 @@ def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -216867,8 +217897,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -216882,16 +217931,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -216923,6 +218020,8 @@ def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -217067,6 +218166,12 @@ def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -217094,8 +218199,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -217108,14 +218232,59 @@ def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -217155,6 +218324,8 @@ def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -217194,6 +218365,13 @@ def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -217224,6 +218402,15 @@ def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -217237,7 +218424,8 @@ def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -217331,18 +218519,39 @@ def create_agent( "str" # Optional. Ids of the knowledge base(s) to attach to the agent. ], + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI API key ID to use with OpenAI models. "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. + "reasoning_effort": "str", # Optional. "region": "str", # Optional. The DigitalOcean region to deploy your agent in. + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. Agent tag to organize related resources. ], + "thinking_token_budget": 0, # Optional. "workspace_uuid": "str" # Optional. Identifier for the workspace. } @@ -217583,6 +218792,12 @@ def create_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -217609,6 +218824,24 @@ def create_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -217617,8 +218850,27 @@ def create_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -217630,14 +218882,56 @@ def create_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -217672,8 +218966,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -217687,16 +219000,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -217725,6 +219086,44 @@ def create_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -217744,8 +219143,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -217759,16 +219177,64 @@ def create_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -217800,6 +219266,8 @@ def create_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -217944,6 +219412,12 @@ def create_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -217971,8 +219445,27 @@ def create_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -217985,14 +219478,59 @@ def create_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -218032,6 +219570,8 @@ def create_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -218071,6 +219611,13 @@ def create_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -218101,6 +219648,15 @@ def create_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -218114,7 +219670,8 @@ def create_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -219471,6 +221028,12 @@ def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -219497,6 +221060,24 @@ def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -219505,8 +221086,27 @@ def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -219518,14 +221118,56 @@ def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -219560,8 +221202,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -219575,16 +221236,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -219613,6 +221322,44 @@ def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -219632,8 +221379,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -219647,16 +221413,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -219688,6 +221502,8 @@ def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -219832,6 +221648,12 @@ def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -219859,8 +221681,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -219873,14 +221714,59 @@ def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -219920,6 +221806,8 @@ def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -219959,6 +221847,13 @@ def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -219989,6 +221884,15 @@ def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -220002,7 +221906,8 @@ def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -220329,6 +222234,12 @@ def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -220355,6 +222266,24 @@ def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -220363,8 +222292,27 @@ def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -220376,14 +222324,56 @@ def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -220418,8 +222408,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -220433,16 +222442,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -220471,6 +222528,44 @@ def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -220490,8 +222585,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -220505,16 +222619,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -220546,6 +222708,8 @@ def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -220690,6 +222854,12 @@ def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -220717,8 +222887,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -220731,14 +222920,59 @@ def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -220778,6 +223012,8 @@ def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -220817,6 +223053,13 @@ def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -220847,6 +223090,15 @@ def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -220860,7 +223112,8 @@ def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -221197,6 +223450,12 @@ def attach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -221223,6 +223482,24 @@ def attach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -221231,8 +223508,27 @@ def attach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -221244,14 +223540,56 @@ def attach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -221286,8 +223624,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -221301,16 +223658,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -221339,6 +223744,44 @@ def attach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -221358,8 +223801,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -221373,16 +223835,64 @@ def attach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -221414,6 +223924,8 @@ def attach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -221558,6 +224070,12 @@ def attach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -221585,8 +224103,27 @@ def attach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -221599,14 +224136,59 @@ def attach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -221646,6 +224228,8 @@ def attach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -221685,6 +224269,13 @@ def attach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -221715,6 +224306,15 @@ def attach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -221728,7 +224328,8 @@ def attach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -222168,6 +224769,12 @@ def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -222194,6 +224801,24 @@ def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -222202,8 +224827,27 @@ def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -222215,14 +224859,56 @@ def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -222257,8 +224943,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -222272,16 +224977,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -222310,6 +225063,44 @@ def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -222329,8 +225120,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -222344,16 +225154,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -222385,6 +225243,8 @@ def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -222529,6 +225389,12 @@ def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -222556,8 +225422,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -222570,14 +225455,59 @@ def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -222617,6 +225547,8 @@ def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -222656,6 +225588,13 @@ def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -222686,6 +225625,15 @@ def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -222699,7 +225647,8 @@ def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -223029,6 +225978,12 @@ def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -223055,6 +226010,24 @@ def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -223063,8 +226036,27 @@ def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -223076,14 +226068,56 @@ def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -223118,8 +226152,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -223133,16 +226186,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -223171,6 +226272,44 @@ def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -223190,8 +226329,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -223205,16 +226363,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -223246,6 +226452,8 @@ def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -223390,6 +226598,12 @@ def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -223417,8 +226631,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -223431,14 +226664,59 @@ def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -223478,6 +226756,8 @@ def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -223517,6 +226797,13 @@ def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -223547,6 +226834,15 @@ def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -223560,7 +226856,8 @@ def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -223901,6 +227198,12 @@ def update_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -223927,6 +227230,24 @@ def update_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -223935,8 +227256,27 @@ def update_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -223948,14 +227288,56 @@ def update_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -223990,8 +227372,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -224005,16 +227406,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -224043,6 +227492,44 @@ def update_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -224062,8 +227549,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -224077,16 +227583,64 @@ def update_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -224118,6 +227672,8 @@ def update_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -224262,6 +227818,12 @@ def update_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -224289,8 +227851,27 @@ def update_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -224303,14 +227884,59 @@ def update_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -224350,6 +227976,8 @@ def update_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -224389,6 +228017,13 @@ def update_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -224419,6 +228054,15 @@ def update_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -224432,7 +228076,8 @@ def update_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -224847,6 +228492,12 @@ def detach_agent_function( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -224873,6 +228524,24 @@ def detach_agent_function( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -224881,8 +228550,27 @@ def detach_agent_function( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -224894,14 +228582,56 @@ def detach_agent_function( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -224936,8 +228666,27 @@ def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -224951,16 +228700,64 @@ def detach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -224989,6 +228786,44 @@ def detach_agent_function( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -225008,8 +228843,27 @@ def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -225023,16 +228877,64 @@ def detach_agent_function( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -225064,6 +228966,8 @@ def detach_agent_function( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -225208,6 +229112,12 @@ def detach_agent_function( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -225235,8 +229145,27 @@ def detach_agent_function( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -225249,14 +229178,59 @@ def detach_agent_function( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -225296,6 +229270,8 @@ def detach_agent_function( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -225335,6 +229311,13 @@ def detach_agent_function( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -225365,6 +229348,15 @@ def detach_agent_function( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -225378,7 +229370,8 @@ def detach_agent_function( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -225794,6 +229787,12 @@ def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -225820,6 +229819,24 @@ def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -225828,8 +229845,27 @@ def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -225841,14 +229877,56 @@ def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -225883,8 +229961,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -225898,16 +229995,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -225936,6 +230081,44 @@ def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -225955,8 +230138,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -225970,16 +230172,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -226011,6 +230261,8 @@ def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -226155,6 +230407,12 @@ def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -226182,8 +230440,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -226196,14 +230473,59 @@ def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -226243,6 +230565,8 @@ def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -226282,6 +230606,13 @@ def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -226312,6 +230643,15 @@ def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -226325,7 +230665,8 @@ def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -226652,6 +230993,12 @@ def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -226678,6 +231025,24 @@ def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -226686,8 +231051,27 @@ def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -226699,14 +231083,56 @@ def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -226741,8 +231167,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -226756,16 +231201,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -226794,6 +231287,44 @@ def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -226813,8 +231344,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -226828,16 +231378,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -226869,6 +231467,8 @@ def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -227013,6 +231613,12 @@ def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -227040,8 +231646,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -227054,14 +231679,59 @@ def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -227101,6 +231771,8 @@ def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -227140,6 +231812,13 @@ def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -227170,6 +231849,15 @@ def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -227183,7 +231871,8 @@ def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -227516,6 +232205,12 @@ def attach_agent_guardrails( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -227542,6 +232237,24 @@ def attach_agent_guardrails( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -227550,8 +232263,27 @@ def attach_agent_guardrails( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -227563,14 +232295,56 @@ def attach_agent_guardrails( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -227605,8 +232379,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -227620,16 +232413,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -227658,6 +232499,44 @@ def attach_agent_guardrails( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -227677,8 +232556,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -227692,16 +232590,64 @@ def attach_agent_guardrails( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -227733,6 +232679,8 @@ def attach_agent_guardrails( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -227877,6 +232825,12 @@ def attach_agent_guardrails( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -227904,8 +232858,27 @@ def attach_agent_guardrails( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -227918,14 +232891,59 @@ def attach_agent_guardrails( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -227965,6 +232983,8 @@ def attach_agent_guardrails( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -228004,6 +233024,13 @@ def attach_agent_guardrails( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -228034,6 +233061,15 @@ def attach_agent_guardrails( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -228047,7 +233083,8 @@ def attach_agent_guardrails( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -228460,6 +233497,12 @@ def detach_agent_guardrail( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -228486,6 +233529,24 @@ def detach_agent_guardrail( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -228494,8 +233555,27 @@ def detach_agent_guardrail( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -228507,14 +233587,56 @@ def detach_agent_guardrail( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -228549,8 +233671,27 @@ def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -228564,16 +233705,64 @@ def detach_agent_guardrail( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -228602,6 +233791,44 @@ def detach_agent_guardrail( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -228621,8 +233848,27 @@ def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -228636,16 +233882,64 @@ def detach_agent_guardrail( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -228677,6 +233971,8 @@ def detach_agent_guardrail( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -228821,6 +234117,12 @@ def detach_agent_guardrail( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -228848,8 +234150,27 @@ def detach_agent_guardrail( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -228862,14 +234183,59 @@ def detach_agent_guardrail( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -228909,6 +234275,8 @@ def detach_agent_guardrail( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -228948,6 +234316,13 @@ def detach_agent_guardrail( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -228978,6 +234353,15 @@ def detach_agent_guardrail( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -228991,7 +234375,8 @@ def detach_agent_guardrail( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -229384,6 +234769,12 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -229410,6 +234801,24 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -229418,8 +234827,27 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -229431,14 +234859,56 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -229473,8 +234943,27 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -229488,16 +234977,64 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -229526,6 +235063,44 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -229545,8 +235120,27 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -229560,16 +235154,64 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -229601,6 +235243,8 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -229745,6 +235389,12 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -229772,8 +235422,27 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -229786,14 +235455,59 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -229833,6 +235547,8 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -229872,6 +235588,13 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -229902,6 +235625,15 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -229915,7 +235647,8 @@ def attach_knowledge_bases(self, agent_uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -230311,6 +236044,12 @@ def attach_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -230337,6 +236076,24 @@ def attach_knowledge_base( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -230345,8 +236102,27 @@ def attach_knowledge_base( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -230358,14 +236134,56 @@ def attach_knowledge_base( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -230400,8 +236218,27 @@ def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -230415,16 +236252,64 @@ def attach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -230453,6 +236338,44 @@ def attach_knowledge_base( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -230472,8 +236395,27 @@ def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -230487,16 +236429,64 @@ def attach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -230528,6 +236518,8 @@ def attach_knowledge_base( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -230672,6 +236664,12 @@ def attach_knowledge_base( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -230699,8 +236697,27 @@ def attach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -230713,14 +236730,59 @@ def attach_knowledge_base( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -230760,6 +236822,8 @@ def attach_knowledge_base( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -230799,6 +236863,13 @@ def attach_knowledge_base( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -230829,6 +236900,15 @@ def attach_knowledge_base( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -230842,7 +236922,8 @@ def attach_knowledge_base( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -231239,6 +237320,12 @@ def detach_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -231265,6 +237352,24 @@ def detach_knowledge_base( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -231273,8 +237378,27 @@ def detach_knowledge_base( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -231286,14 +237410,56 @@ def detach_knowledge_base( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -231328,8 +237494,27 @@ def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -231343,16 +237528,64 @@ def detach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -231381,6 +237614,44 @@ def detach_knowledge_base( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -231400,8 +237671,27 @@ def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -231415,16 +237705,64 @@ def detach_knowledge_base( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -231456,6 +237794,8 @@ def detach_knowledge_base( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -231600,6 +237940,12 @@ def detach_knowledge_base( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -231627,8 +237973,27 @@ def detach_knowledge_base( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -231641,14 +238006,59 @@ def detach_knowledge_base( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -231688,6 +238098,8 @@ def detach_knowledge_base( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -231727,6 +238139,13 @@ def detach_knowledge_base( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -231757,6 +238176,15 @@ def detach_knowledge_base( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -231770,7 +238198,8 @@ def detach_knowledge_base( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -232812,6 +239241,12 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -232838,6 +239273,24 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -232846,8 +239299,27 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -232859,14 +239331,56 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -232901,8 +239415,27 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -232916,16 +239449,64 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -232954,6 +239535,44 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -232973,8 +239592,27 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -232988,16 +239626,64 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -233029,6 +239715,8 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -233173,6 +239861,12 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -233200,8 +239894,27 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -233214,14 +239927,59 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -233261,6 +240019,8 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -233300,6 +240060,13 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -233330,6 +240097,15 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -233343,7 +240119,8 @@ def get_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -233519,6 +240296,8 @@ def update_agent( ], "anthropic_key_uuid": "str", # Optional. Optional anthropic key uuid for use with anthropic models. + "clear_mcp_servers": bool, # Optional. When true, removes all MCP servers + from the agent. Use this instead of sending an empty mcp_servers array. "conversation_logs_enabled": bool, # Optional. Optional update of conversation logs enabled. "description": "str", # Optional. Agent description. @@ -233531,8 +240310,26 @@ def update_agent( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. Optional Model Provider uuid for use with provider models. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI key uuid for use with @@ -233540,6 +240337,7 @@ def update_agent( "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. "provide_citations": bool, # Optional. + "reasoning_effort": "str", # Optional. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown * RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite * @@ -233549,12 +240347,14 @@ def update_agent( "RETRIEVAL_METHOD_UNKNOWN", "RETRIEVAL_METHOD_REWRITE", "RETRIEVAL_METHOD_STEP_BACK", "RETRIEVAL_METHOD_SUB_QUERIES", and "RETRIEVAL_METHOD_NONE". + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. A set of abitrary tags to organize your agent. ], "temperature": 0.0, # Optional. Controls the model"u2019s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. + "thinking_token_budget": 0, # Optional. "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. @@ -233798,6 +240598,12 @@ def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -233824,6 +240630,24 @@ def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -233832,8 +240656,27 @@ def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -233845,14 +240688,56 @@ def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -233887,8 +240772,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -233902,16 +240806,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -233940,6 +240892,44 @@ def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -233959,8 +240949,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -233974,16 +240983,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -234015,6 +241072,8 @@ def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -234159,6 +241218,12 @@ def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -234186,8 +241251,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -234200,14 +241284,59 @@ def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -234247,6 +241376,8 @@ def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -234286,6 +241417,13 @@ def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -234316,6 +241454,15 @@ def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -234329,7 +241476,8 @@ def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -234656,6 +241804,12 @@ def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -234682,6 +241836,24 @@ def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -234690,8 +241862,27 @@ def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -234703,14 +241894,56 @@ def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -234745,8 +241978,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -234760,16 +242012,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -234798,6 +242098,44 @@ def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -234817,8 +242155,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -234832,16 +242189,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -234873,6 +242278,8 @@ def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -235017,6 +242424,12 @@ def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -235044,8 +242457,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -235058,14 +242490,59 @@ def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -235105,6 +242582,8 @@ def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -235144,6 +242623,13 @@ def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -235174,6 +242660,15 @@ def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -235187,7 +242682,8 @@ def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -235278,6 +242774,8 @@ def update_agent( ], "anthropic_key_uuid": "str", # Optional. Optional anthropic key uuid for use with anthropic models. + "clear_mcp_servers": bool, # Optional. When true, removes all MCP servers + from the agent. Use this instead of sending an empty mcp_servers array. "conversation_logs_enabled": bool, # Optional. Optional update of conversation logs enabled. "description": "str", # Optional. Agent description. @@ -235290,8 +242788,26 @@ def update_agent( "max_tokens": 0, # Optional. Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed tool + names to expose from this server. + ], + "authorization": "str", # Optional. Optional authorization + header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional headers + to send to the MCP server. + }, + "server_label": "str", # Optional. A label identifying this + MCP server. + "server_url": "str" # Optional. The URL of the MCP server. + } + ], "model_provider_key_uuid": "str", # Optional. Optional Model Provider uuid for use with provider models. + "model_router_uuid": "str", # Optional. "model_uuid": "str", # Optional. Identifier for the foundation model. "name": "str", # Optional. Agent name. "open_ai_key_uuid": "str", # Optional. Optional OpenAI key uuid for use with @@ -235299,6 +242815,7 @@ def update_agent( "project_id": "str", # Optional. The id of the DigitalOcean project this agent will belong to. "provide_citations": bool, # Optional. + "reasoning_effort": "str", # Optional. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown * RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite * @@ -235308,12 +242825,14 @@ def update_agent( "RETRIEVAL_METHOD_UNKNOWN", "RETRIEVAL_METHOD_REWRITE", "RETRIEVAL_METHOD_STEP_BACK", "RETRIEVAL_METHOD_SUB_QUERIES", and "RETRIEVAL_METHOD_NONE". + "router_preset_slug": "str", # Optional. "tags": [ "str" # Optional. A set of abitrary tags to organize your agent. ], "temperature": 0.0, # Optional. Controls the model"u2019s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. + "thinking_token_budget": 0, # Optional. "top_p": 0.0, # Optional. Defines the cumulative probability threshold for word selection, specified as a number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. @@ -235557,6 +243076,12 @@ def update_agent( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -235583,6 +243108,24 @@ def update_agent( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -235591,8 +243134,27 @@ def update_agent( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -235604,14 +243166,56 @@ def update_agent( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -235646,8 +243250,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -235661,16 +243284,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -235699,6 +243370,44 @@ def update_agent( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -235718,8 +243427,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -235733,16 +243461,64 @@ def update_agent( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -235774,6 +243550,8 @@ def update_agent( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -235918,6 +243696,12 @@ def update_agent( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -235945,8 +243729,27 @@ def update_agent( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -235959,14 +243762,59 @@ def update_agent( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -236006,6 +243854,8 @@ def update_agent( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -236045,6 +243895,13 @@ def update_agent( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -236075,6 +243932,15 @@ def update_agent( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -236088,7 +243954,8 @@ def update_agent( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -236496,6 +244363,12 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -236522,6 +244395,24 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -236530,8 +244421,27 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -236543,14 +244453,56 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -236585,8 +244537,27 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -236600,16 +244571,64 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -236638,6 +244657,44 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -236657,8 +244714,27 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -236672,16 +244748,64 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -236713,6 +244837,8 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -236857,6 +244983,12 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -236884,8 +245016,27 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -236898,14 +245049,59 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -236945,6 +245141,8 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -236984,6 +245182,13 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -237014,6 +245219,15 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -237027,7 +245241,8 @@ def delete_agent(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -237440,6 +245655,12 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -237467,6 +245688,24 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: stream. }, "max_tokens": 0, # Optional. Child agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -237478,8 +245717,27 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -237492,14 +245750,59 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -237537,8 +245840,27 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -237552,10 +245874,25 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -237563,6 +245900,42 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -237593,6 +245966,45 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -237613,8 +246025,27 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -237628,10 +246059,25 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -237639,6 +246085,42 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -237672,6 +246154,8 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Child agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -237819,6 +246303,12 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -237846,8 +246336,27 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -237861,16 +246370,64 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -237914,6 +246471,8 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Child agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -237956,6 +246515,13 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -237986,6 +246552,16 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -237999,8 +246575,10 @@ def get_agent_children(self, uuid: str, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -238423,6 +247001,12 @@ def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -238449,6 +247033,24 @@ def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -238457,8 +247059,27 @@ def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -238470,14 +247091,56 @@ def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -238512,8 +247175,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -238527,16 +247209,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -238565,6 +247295,44 @@ def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -238584,8 +247352,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -238599,16 +247386,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -238640,6 +247475,8 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -238784,6 +247621,12 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -238811,8 +247654,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -238825,14 +247687,59 @@ def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -238872,6 +247779,8 @@ def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -238911,6 +247820,13 @@ def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -238941,6 +247857,15 @@ def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -238954,7 +247879,8 @@ def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -239281,6 +248207,12 @@ def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -239307,6 +248239,24 @@ def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -239315,8 +248265,27 @@ def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -239328,14 +248297,56 @@ def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -239370,8 +248381,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -239385,16 +248415,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -239423,6 +248501,44 @@ def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -239442,8 +248558,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -239457,16 +248592,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -239498,6 +248681,8 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -239642,6 +248827,12 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -239669,8 +248860,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -239683,14 +248893,59 @@ def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -239730,6 +248985,8 @@ def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -239769,6 +249026,13 @@ def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -239799,6 +249063,15 @@ def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -239812,7 +249085,8 @@ def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -240145,6 +249419,12 @@ def update_agent_deployment_visibility( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker model + internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -240171,6 +249451,24 @@ def update_agent_deployment_visibility( "log_stream_name": "str" # Optional. Name of the log stream. }, "max_tokens": 0, # Optional. An Agent. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of allowed + tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional additional + headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of the MCP + server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -240179,8 +249477,27 @@ def update_agent_deployment_visibility( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window size in + tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally used version. @@ -240192,14 +249509,56 @@ def update_agent_deployment_visibility( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -240234,8 +249593,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -240249,16 +249627,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -240287,6 +249713,44 @@ def update_agent_deployment_visibility( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level fallback + models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # + Optional. Short task description. + "name": "str" # Optional. + Task name. + }, + "models": [ + "str" # Optional. Models + assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. + One of: none, cheapest, fastest. + }, + "task_slug": "str" # Optional. Task + slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -240306,8 +249770,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -240321,16 +249804,64 @@ def update_agent_deployment_visibility( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -240362,6 +249893,8 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. An Agent. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort for the + agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: The @@ -240506,6 +250039,12 @@ def update_agent_deployment_visibility( "project_id": "str", # Optional. List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -240533,8 +250072,27 @@ def update_agent_deployment_visibility( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -240547,14 +250105,59 @@ def update_agent_deployment_visibility( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -240594,6 +250197,8 @@ def update_agent_deployment_visibility( template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token budget + for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. An Agent. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "url": "str", # Optional. Access your agent under this url. @@ -240633,6 +250238,13 @@ def update_agent_deployment_visibility( # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -240663,6 +250275,15 @@ def update_agent_deployment_visibility( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -240676,7 +250297,8 @@ def update_agent_deployment_visibility( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -242669,6 +252291,12 @@ def list_agents_by_anthropic_key( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -242696,6 +252324,24 @@ def list_agents_by_anthropic_key( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -242707,8 +252353,27 @@ def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -242721,14 +252386,59 @@ def list_agents_by_anthropic_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -242766,8 +252476,27 @@ def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -242781,10 +252510,25 @@ def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -242792,6 +252536,42 @@ def list_agents_by_anthropic_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -242822,6 +252602,45 @@ def list_agents_by_anthropic_key( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -242842,8 +252661,27 @@ def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -242857,10 +252695,25 @@ def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -242868,6 +252721,42 @@ def list_agents_by_anthropic_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -242901,6 +252790,8 @@ def list_agents_by_anthropic_key( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -243048,6 +252939,12 @@ def list_agents_by_anthropic_key( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -243075,8 +252972,27 @@ def list_agents_by_anthropic_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -243090,16 +253006,64 @@ def list_agents_by_anthropic_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -243143,6 +253107,8 @@ def list_agents_by_anthropic_key( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -243185,6 +253151,13 @@ def list_agents_by_anthropic_key( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -243215,6 +253188,16 @@ def list_agents_by_anthropic_key( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -243228,8 +253211,10 @@ def list_agents_by_anthropic_key( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -243387,81 +253372,28 @@ def list_agents_by_anthropic_key( return cast(JSON, deserialized) # type: ignore - @overload - def create_evaluation_dataset( - self, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Create Evaluation Dataset. - - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. - - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value - is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: - "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", and - "EVALUATION_DATASET_TYPE_NON_ADK". - "file_upload_dataset": { - "original_file_name": "str", # Optional. The original file name. - "size_in_bytes": "str", # Optional. The size of the file in bytes. - "stored_object_key": "str" # Optional. The object key the file was - stored as. - }, - "name": "str" # Optional. The name of the agent evaluation dataset. - } - - # response body for status code(s): 200 - response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def create_evaluation_dataset( + @distributed_trace + def list_custom_models( self, - body: Optional[IO[bytes]] = None, *, - content_type: str = "application/json", + page: Optional[int] = None, + per_page: Optional[int] = None, + status: str = "STATUS_UNSPECIFIED", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Evaluation Dataset. + """List Custom Models. - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + To list custom models, send a GET request to ``/v2/gen-ai/custom_models``. - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :keyword page: Page number for pagination. Default value is None. + :paramtype page: int + :keyword per_page: Number of items per page. Default value is None. + :paramtype per_page: int + :keyword status: Filter by model status. Known values are: "STATUS_UNSPECIFIED", + "STATUS_IMPORTING", "STATUS_READY", "STATUS_FAILED", and "STATUS_DELETED". Default value is + "STATUS_UNSPECIFIED". + :paramtype status: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -243471,57 +253403,118 @@ def create_evaluation_dataset( # response body for status code(s): 200 response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def create_evaluation_dataset( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create Evaluation Dataset. - - To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. - - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value - is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: - "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", and - "EVALUATION_DATASET_TYPE_NON_ADK". - "file_upload_dataset": { - "original_file_name": "str", # Optional. The original file name. - "size_in_bytes": "str", # Optional. The size of the file in bytes. - "stored_object_key": "str" # Optional. The object key the file was - stored as. + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } }, - "name": "str" # Optional. The name of the agent evaluation dataset. - } - - # response body for status code(s): 200 - response == { - "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + "max_threshold": 0, # Optional. Maximum number of custom models allowed for + this team's tier. + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "models": [ + { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 + timestamp indicating when the dedicated inference deployment was + created. + "endpoints": { + "private_endpoint_fqdn": "str", # + Optional. Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # + Optional. Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier + (UUID) of the dedicated inference deployment. + "name": "str", # Optional. Human-readable + name of the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of + the region where the dedicated inference deployment is running + (e.g. "atl1"). + "state": "str", # Optional. Current + lifecycle state of the dedicated inference deployment (e.g. + "ACTIVE", "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 + timestamp indicating when the dedicated inference deployment was + last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from + the model repository. + "context_length": 0, # Optional. Maximum context length + supported by the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly + cost in dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the model was created. + "description": "str", # Optional. Description of the custom + model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., + text, image). + ], + "license": "str", # Optional. License under which the model + is distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., + text, image). + ], + "parameters": "str", # Optional. Number of parameters in the + model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # + Optional. Default value is "ACCESS_TYPE_UNSPECIFIED". Access level + required for the model repository. Known values are: + "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of + the model version. + "hf_token": "str", # Optional. User-provided + HuggingFace token for gated/private models (not persisted in + source_ref). + "prefix": "str", # Optional. Object prefix path in + the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. + Default value is "SOURCE_TYPE_UNSPECIFIED". Source from which the model + was imported. Known values are: "SOURCE_TYPE_UNSPECIFIED", + "SOURCE_TYPE_HUGGINGFACE", "SOURCE_TYPE_SPACES_BUCKET", + "SOURCE_TYPE_SDK_UPLOAD", and "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. + Known values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", + "STATUS_READY", "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces + bucket where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model + files in bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom + model. + } + ] } # response body for status code(s): 404 response == { @@ -243548,29 +253541,15 @@ def create_evaluation_dataset( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_genai_create_evaluation_dataset_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_genai_list_custom_models_request( + page=page, + per_page=per_page, + status=status, headers=_headers, params=_params, ) @@ -243630,7 +253609,7 @@ def create_evaluation_dataset( return cast(JSON, deserialized) # type: ignore @overload - def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + def import_custom_model( self, body: Optional[JSON] = None, *, @@ -243638,10 +253617,9 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Default value is None. :type body: JSON @@ -243657,29 +253635,151 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam # JSON input template you can fill out and use as your body input. body = { - "files": [ - { - "file_name": "str", # Optional. Local filename. - "file_size": "str" # Optional. The size of the file in - bytes. - } - ] + "accept_terms_and_conditions": bool, # Optional. Whether the caller accepts + the terms and conditions for importing this model. + "description": "str", # Optional. Description of the model. + "name": "str", # Optional. Name for the imported model. + "preferred_gpu_region": "str", # Optional. Preferred GPU region for + deployment. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. Default value + is "ACCESS_TYPE_UNSPECIFIED". Access level required for the model repository. + Known values are: "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace token for + gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value is + "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known values + are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + } } # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. } ] } @@ -243697,7 +253797,7 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam """ @overload - def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + def import_custom_model( self, body: Optional[IO[bytes]] = None, *, @@ -243705,10 +253805,9 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Default value is None. :type body: IO[bytes] @@ -243724,18 +253823,117 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. } ] } @@ -243753,14 +253951,13 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam """ @distributed_trace - def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + def import_custom_model( self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Presigned URLs for Evaluation Dataset File Upload. + """Import Custom Model. - To create presigned URLs for evaluation dataset file upload, send a POST request to - ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + To import a custom model, send a POST request to ``/v2/gen-ai/custom_models/import``. :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] @@ -243773,29 +253970,1289 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam # JSON input template you can fill out and use as your body input. body = { - "files": [ - { - "file_name": "str", # Optional. Local filename. - "file_size": "str" # Optional. The size of the file in - bytes. - } - ] + "accept_terms_and_conditions": bool, # Optional. Whether the caller accepts + the terms and conditions for importing this model. + "description": "str", # Optional. Description of the model. + "name": "str", # Optional. Name for the imported model. + "preferred_gpu_region": "str", # Optional. Preferred GPU region for + deployment. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. Default value + is "ACCESS_TYPE_UNSPECIFIED". Access level required for the model repository. + Known values are: "ACCESS_TYPE_UNSPECIFIED", "ACCESS_TYPE_PUBLIC", + "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace token for + gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value is + "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known values + are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + } } # response body for status code(s): 200 response == { - "request_id": "str", # Optional. The ID generated for the request for - Presigned URLs. - "uploads": [ + "error": "str", # Optional. + "import_job": { + "bytes_done": "str", # Optional. Bytes imported so far. + "bytes_total": "str", # Optional. Total bytes to import. + "completed_at": "2020-02-20 00:00:00", # Optional. Timestamp when + the import completed. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + job was created. + "error_message": "str", # Optional. Error message if import failed. + "error_step": "str", # Optional. Step at which the error occurred. + "files_done": 0, # Optional. Number of files imported so far. + "files_total": 0, # Optional. Total number of files to import. + "started_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + import started. + "status": "str", # Optional. Current status of the import job. + "uuid": "str" # Optional. Unique identifier for the import job. + }, + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + }, + "validation_steps": [ { - "expires_at": "2020-02-20 00:00:00", # Optional. The time - the url expires at. - "object_key": "str", # Optional. The unique object key to - store the file as. - "original_file_name": "str", # Optional. The original file - name. - "presigned_url": "str" # Optional. The actual presigned URL - the client can use to upload the file directly. + "error": "str", # Optional. Error message if validation + failed. + "name": "str", # Optional. Name of the validation step. + "passed": bool # Optional. Whether the validation step + passed. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_import_custom_model_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def delete_custom_model(self, uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Delete Custom Model. + + To delete a custom model, send a DELETE request to ``/v2/genai/custom_models/{uuid}``. + + :param uuid: UUID of the custom model to delete. Required. + :type uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "error": "str", # Optional. Error message if deletion failed. + "status": "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED" # Optional. Default value + is "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED". Status of delete operation. Known + values are: "DELETE_CUSTOM_MODEL_STATUS_UNSPECIFIED", + "DELETE_CUSTOM_MODEL_STATUS_SUCCESS", and "DELETE_CUSTOM_MODEL_STATUS_FAIL". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_delete_custom_model_request( + uuid=uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def update_custom_model_metadata( + self, + uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Custom Model Metadata. + + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + + :param uuid: UUID of the custom model to update. Required. + :type uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "description": "str", # Optional. + "name": "str", # Optional. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "uuid": "str" # Optional. UUID of the custom model to update. + } + + # response body for status code(s): 200 + response == { + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_custom_model_metadata( + self, + uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Custom Model Metadata. + + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + + :param uuid: UUID of the custom model to update. Required. + :type uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_custom_model_metadata( + self, uuid: str, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Custom Model Metadata. + + To update custom model metadata, send a PATCH request to + ``/v2/gen-ai/custom_models/{uuid}/metadata``. + + :param uuid: UUID of the custom model to update. Required. + :type uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "description": "str", # Optional. + "name": "str", # Optional. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "uuid": "str" # Optional. UUID of the custom model to update. + } + + # response body for status code(s): 200 + response == { + "model": { + "active_deployments": [ + { + "created_at": "str", # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. + Private FQDN for the deployment. + "public_endpoint_fqdn": "str" # Optional. + Public FQDN for the deployment. + }, + "id": "str", # Optional. Unique identifier (UUID) of + the dedicated inference deployment. + "name": "str", # Optional. Human-readable name of + the dedicated inference deployment. + "region_slug": "str", # Optional. Slug of the region + where the dedicated inference deployment is running (e.g. "atl1"). + "state": "str", # Optional. Current lifecycle state + of the dedicated inference deployment (e.g. "ACTIVE", + "PROVISIONING"). + "updated_at": "str" # Optional. RFC 3339 timestamp + indicating when the dedicated inference deployment was last updated. + } + ], + "architecture": "str", # Optional. Model architecture type + (free-form string from config.json). + "config_json": {}, # Optional. Raw config.json contents from the + model repository. + "context_length": 0, # Optional. Maximum context length supported by + the model. + "cost_estimate_per_month": 0, # Optional. Estimated monthly cost in + dollars for hosting. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was created. + "description": "str", # Optional. Description of the custom model. + "file_count": 0, # Optional. Number of files in the model. + "input_modalities": [ + "str" # Optional. Input modalities supported (e.g., text, + image). + ], + "license": "str", # Optional. License under which the model is + distributed. + "name": "str", # Optional. Name of the custom model. + "output_modalities": [ + "str" # Optional. Output modalities supported (e.g., text, + image). + ], + "parameters": "str", # Optional. Number of parameters in the model. + "source_ref": { + "access_type": "ACCESS_TYPE_UNSPECIFIED", # Optional. + Default value is "ACCESS_TYPE_UNSPECIFIED". Access level required for the + model repository. Known values are: "ACCESS_TYPE_UNSPECIFIED", + "ACCESS_TYPE_PUBLIC", "ACCESS_TYPE_PRIVATE", and "ACCESS_TYPE_GATED". + "bucket": "str", # Optional. Spaces bucket name. + "commit_sha": "str", # Optional. Git commit SHA of the model + version. + "hf_token": "str", # Optional. User-provided HuggingFace + token for gated/private models (not persisted in source_ref). + "prefix": "str", # Optional. Object prefix path in the + bucket. + "region": "str", # Optional. Spaces bucket region. + "repo_id": "str" # Optional. Huggingface repository + identifier. + }, + "source_type": "SOURCE_TYPE_UNSPECIFIED", # Optional. Default value + is "SOURCE_TYPE_UNSPECIFIED". Source from which the model was imported. Known + values are: "SOURCE_TYPE_UNSPECIFIED", "SOURCE_TYPE_HUGGINGFACE", + "SOURCE_TYPE_SPACES_BUCKET", "SOURCE_TYPE_SDK_UPLOAD", and + "SOURCE_TYPE_FINE_TUNING". + "status": "STATUS_UNSPECIFIED", # Optional. Default value is + "STATUS_UNSPECIFIED". Import and deployment status of the custom model. Known + values are: "STATUS_UNSPECIFIED", "STATUS_IMPORTING", "STATUS_READY", + "STATUS_FAILED", and "STATUS_DELETED". + "storage_region": "str", # Optional. Region of the Spaces bucket + where model files are stored. + "tags": { + "tags": [ + "str" # Optional. List of tag strings. + ] + }, + "team_id": "str", # Optional. Team that owns the model. + "total_size_bytes": "str", # Optional. Total size of model files in + bytes. + "updated_at": "2020-02-20 00:00:00", # Optional. Timestamp when the + model was last updated. + "uuid": "str" # Optional. Unique identifier for the custom model. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_update_custom_model_metadata_request( + uuid=uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_evaluation_dataset( + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Evaluation Dataset. + + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and "EVALUATION_DATASET_TYPE_MODEL". + "file_upload_dataset": { + "original_file_name": "str", # Optional. The original file name. + "size_in_bytes": "str", # Optional. The size of the file in bytes. + "stored_object_key": "str" # Optional. The object key the file was + stored as. + }, + "name": "str" # Optional. The name of the agent evaluation dataset. + } + + # response body for status code(s): 200 + response == { + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_evaluation_dataset( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Evaluation Dataset. + + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_evaluation_dataset( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Evaluation Dataset. + + To create an evaluation dataset, send a POST request to ``/v2/gen-ai/evaluation_datasets``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and "EVALUATION_DATASET_TYPE_MODEL". + "file_upload_dataset": { + "original_file_name": "str", # Optional. The original file name. + "size_in_bytes": "str", # Optional. The size of the file in bytes. + "stored_object_key": "str" # Optional. The object key the file was + stored as. + }, + "name": "str" # Optional. The name of the agent evaluation dataset. + } + + # response body for status code(s): 200 + response == { + "evaluation_dataset_uuid": "str" # Optional. Evaluation dataset uuid. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_create_evaluation_dataset_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Evaluation Dataset File Upload. + + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Evaluation Dataset File Upload. + + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=name-too-long + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Evaluation Dataset File Upload. + + To create presigned URLs for evaluation dataset file upload, send a POST request to + ``/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. } ] } @@ -243907,6 +255364,121 @@ def create_evaluation_dataset_file_upload_presigned_urls( # pylint: disable=nam return cast(JSON, deserialized) # type: ignore + @distributed_trace + def get_evaluation_dataset_download_url( + self, dataset_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Get Download URL for Evaluation Dataset. + + To get a presigned download URL for an evaluation dataset, send a GET request to + ``/v2/genai/evaluation_datasets/{dataset_uuid}/download_url``. + + :param dataset_uuid: UUID of the evaluation dataset. Required. + :type dataset_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "download_url": "str", # Optional. The presigned URL to download the dataset + file. + "expires_at": "2020-02-20 00:00:00" # Optional. The time the URL expires at. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_evaluation_dataset_download_url_request( + dataset_uuid=dataset_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + @distributed_trace def list_evaluation_metrics(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long @@ -243931,6 +255503,13 @@ def list_evaluation_metrics(self, **kwargs: Any) -> JSON: "METRIC_CATEGORY_USER_OUTCOMES", "METRIC_CATEGORY_SAFETY_AND_SECURITY", "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", # + Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. "is_metric_goal": bool, # Optional. @@ -243938,8 +255517,9 @@ def list_evaluation_metrics(self, **kwargs: Any) -> JSON: "metric_rank": 0, # Optional. "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values @@ -244618,11 +256198,173 @@ def get_evaluation_run_results( Optional. The value of the metric as a string. } ], + "spans": [ + { + "agent": { + "agent_type": + "AGENT_TYPE_UNSPECIFIED", # Optional. Default value + is "AGENT_TYPE_UNSPECIFIED". Agent span. Known values + are: "AGENT_TYPE_UNSPECIFIED", "AGENT_TYPE_DEFAULT", + "AGENT_TYPE_PLANNER", "AGENT_TYPE_REACT", + "AGENT_TYPE_REFLECTION", "AGENT_TYPE_ROUTER", + "AGENT_TYPE_CLASSIFIER", "AGENT_TYPE_SUPERVISOR", and + "AGENT_TYPE_JUDGE". + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "redacted_input": + "str", # Optional. Child spans - must contain + between 1 and 999 spans Allowed types: agent, llm, + tool, retriever (not workflow). + "redacted_output": + "str", # Optional. Child spans - must contain + between 1 and 999 spans Allowed types: agent, llm, + tool, retriever (not workflow). + "spans": [ + ... + ] + }, + "created_at": "2020-02-20 + 00:00:00", # Optional. When the span was created. + "input": {}, # Optional. + Input data for the span (flexible structure - can be + messages array, string, etc.). + "llm": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "model": "str", # + Optional. LLM span. + "num_input_tokens": + 0, # Optional. LLM span. + "num_output_tokens": + 0, # Optional. LLM span. + "temperature": 0.0, + # Optional. LLM span. + "time_to_first_token_ns": "str", # Optional. LLM + span. + "tools": [ + {} # + Optional. Tool definitions passed to the model. + ], + "total_tokens": 0 # + Optional. LLM span. + }, + "name": "str", # Optional. + Name/identifier for the span. + "output": {}, # Optional. + Output data from the span (flexible structure - can be + message, string, etc.). + "retriever": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + } + }, + "tool": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "tool_call_id": "str" + # Optional. Tool span. + }, + "type": + "TRACE_SPAN_TYPE_UNKNOWN", # Optional. Default value is + "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a trace. + Known values are: "TRACE_SPAN_TYPE_UNKNOWN", + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". + "workflow": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common + optional fields shared by all span types. + "duration_ns": "str", # Optional. Common + optional fields shared by all span types. + "metadata": { + "str": "str" # Optional. Arbitrary + structured metadata. + }, + "status_code": 0, # Optional. Common optional + fields shared by all span types. + "tags": [ + "str" + # Optional. Free-form tags for + filtering/grouping. + ] + }, + "spans": [ + ... + ] + } + } + ], "type": "TRACE_SPAN_TYPE_UNKNOWN" # Optional. Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", - "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", and - "TRACE_SPAN_TYPE_TOOL". + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". } ], "ground_truth": "str", # Optional. The ground truth for the @@ -244833,22 +256575,188 @@ def get_evaluation_run_prompt_results( The value of the metric as a string. } ], - "type": "TRACE_SPAN_TYPE_UNKNOWN" # Optional. - Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a - trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", - "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", and - "TRACE_SPAN_TYPE_TOOL". - } - ], - "ground_truth": "str", # Optional. The ground truth for the prompt. - "input": "str", # Optional. - "input_tokens": "str", # Optional. The number of input tokens used - in the prompt. - "output": "str", # Optional. - "output_tokens": "str", # Optional. The number of output tokens used - in the prompt. - "prompt_chunks": [ - { + "spans": [ + { + "agent": { + "agent_type": + "AGENT_TYPE_UNSPECIFIED", # Optional. Default value is + "AGENT_TYPE_UNSPECIFIED". Agent span. Known values are: + "AGENT_TYPE_UNSPECIFIED", "AGENT_TYPE_DEFAULT", + "AGENT_TYPE_PLANNER", "AGENT_TYPE_REACT", + "AGENT_TYPE_REFLECTION", "AGENT_TYPE_ROUTER", + "AGENT_TYPE_CLASSIFIER", "AGENT_TYPE_SUPERVISOR", and + "AGENT_TYPE_JUDGE". + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "redacted_input": "str", # + Optional. Child spans - must contain between 1 and 999 + spans Allowed types: agent, llm, tool, retriever (not + workflow). + "redacted_output": "str", # + Optional. Child spans - must contain between 1 and 999 + spans Allowed types: agent, llm, tool, retriever (not + workflow). + "spans": [ + ... + ] + }, + "created_at": "2020-02-20 00:00:00", + # Optional. When the span was created. + "input": {}, # Optional. Input data + for the span (flexible structure - can be messages array, + string, etc.). + "llm": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "model": "str", # Optional. + LLM span. + "num_input_tokens": 0, # + Optional. LLM span. + "num_output_tokens": 0, # + Optional. LLM span. + "temperature": 0.0, # + Optional. LLM span. + "time_to_first_token_ns": + "str", # Optional. LLM span. + "tools": [ + {} # Optional. Tool + definitions passed to the model. + ], + "total_tokens": 0 # + Optional. LLM span. + }, + "name": "str", # Optional. + Name/identifier for the span. + "output": {}, # Optional. Output + data from the span (flexible structure - can be message, + string, etc.). + "retriever": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + } + }, + "tool": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "tool_call_id": "str" # + Optional. Tool span. + }, + "type": "TRACE_SPAN_TYPE_UNKNOWN", # + Optional. Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types + of spans in a trace. Known values are: + "TRACE_SPAN_TYPE_UNKNOWN", "TRACE_SPAN_TYPE_LLM", + "TRACE_SPAN_TYPE_RETRIEVER", "TRACE_SPAN_TYPE_TOOL", + "TRACE_SPAN_TYPE_AGENT", and "TRACE_SPAN_TYPE_WORKFLOW". + "workflow": { + "common": { + "created_at": + "2020-02-20 00:00:00", # Optional. Common optional + fields shared by all span types. + "duration_ns": "str", + # Optional. Common optional fields shared by all span + types. + "metadata": { + "str": "str" + # Optional. Arbitrary structured metadata. + }, + "status_code": 0, # + Optional. Common optional fields shared by all span + types. + "tags": [ + "str" # + Optional. Free-form tags for filtering/grouping. + ] + }, + "spans": [ + ... + ] + } + } + ], + "type": "TRACE_SPAN_TYPE_UNKNOWN" # Optional. + Default value is "TRACE_SPAN_TYPE_UNKNOWN". Types of spans in a + trace. Known values are: "TRACE_SPAN_TYPE_UNKNOWN", + "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", + "TRACE_SPAN_TYPE_TOOL", "TRACE_SPAN_TYPE_AGENT", and + "TRACE_SPAN_TYPE_WORKFLOW". + } + ], + "ground_truth": "str", # Optional. The ground truth for the prompt. + "input": "str", # Optional. + "input_tokens": "str", # Optional. The number of input tokens used + in the prompt. + "output": "str", # Optional. + "output_tokens": "str", # Optional. The number of output tokens used + in the prompt. + "prompt_chunks": [ + { "chunk_usage_pct": 0.0, # Optional. The usage percentage of the chunk. "chunk_used": bool, # Optional. Indicates if the @@ -245008,6 +256916,11 @@ def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of the @@ -245041,6 +256954,14 @@ def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: "description": "str", # Optional. Alternative way of authentication for internal usage only - should not be exposed to public api. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether a + metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. "is_metric_goal": bool, # Optional. @@ -245055,7 +256976,8 @@ def list_evaluation_test_cases(self, **kwargs: Any) -> JSON: "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Alternative way of authentication for internal usage only - should not be exposed to public api. @@ -245684,6 +257606,11 @@ def get_evaluation_test_case( "created_at": "2020-02-20 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of the dataset uploaded file in bytes. @@ -245704,6 +257631,13 @@ def get_evaluation_test_case( "METRIC_CATEGORY_SAFETY_AND_SECURITY", "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", + # Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope + that determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to + agent metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. "is_metric_goal": bool, # Optional. @@ -245711,8 +257645,9 @@ def get_evaluation_test_case( "metric_rank": 0, # Optional. "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values - are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known @@ -247771,6 +259706,12 @@ def list_knowledge_bases( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. The knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is + enabled for retrieval. + "model": "str" # Optional. Reranker model internal + name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -247931,18 +259872,21 @@ def create_knowledge_base( data_source_details. "bucket_region": "str", # Optional. Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Optional data + sources to attach at creation. Omit or use an empty list to create + the knowledge base without sources, then add sources (with chunking + strategy and sizes) using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic @@ -247969,10 +259913,14 @@ def create_knowledge_base( you can obrain a refresh token by following the oauth2 flow. see /v2/gen-ai/oauth2/google/tokens for reference. }, - "item_path": "str", # Optional. The data sources to use for - this knowledge base. See `Organize Data Sources - `_ - for more information on data sources best practices. + "item_path": "str", # Optional. Optional data sources to + attach at creation. Omit or use an empty list to create the knowledge + base without sources, then add sources (with chunking strategy and sizes) + using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. "spaces_data_source": { "bucket_name": "str", # Optional. Spaces bucket name. @@ -248009,6 +259957,16 @@ def create_knowledge_base( knowledge base will belong to. "region": "str", # Optional. The datacenter region to deploy the knowledge base in. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "size": "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", # Optional. Default value is + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED". Known values are: + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", "OPEN_SEARCH_PLAN_SIZE_SMALL", + "OPEN_SEARCH_PLAN_SIZE_MEDIUM", "OPEN_SEARCH_PLAN_SIZE_LARGE", and + "OPEN_SEARCH_PLAN_SIZE_EXTRA_LARGE". "tags": [ "str" # Optional. Tags to organize your knowledge base. ], @@ -248107,6 +260065,11 @@ def create_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -248245,241 +260208,268 @@ def create_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def create_knowledge_base( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create a Knowledge Base. - - To create a knowledge base, send a POST request to ``/v2/gen-ai/knowledge_bases``. - - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "database_id": "str", # Optional. Identifier of the DigitalOcean OpenSearch - database this knowledge base will use, optional. If not provided, we create a new - database for the knowledge base in the same region as the knowledge base. - "datasources": [ - { - "aws_data_source": { - "bucket_name": "str", # Optional. Spaces bucket - name. - "item_path": "str", # Optional. AWS S3 Data Source. - "key_id": "str", # Optional. The AWS Key ID. - "region": "str", # Optional. Region of bucket. - "secret_key": "str" # Optional. The AWS Secret Key. - }, - "bucket_name": "str", # Optional. Deprecated, moved to - data_source_details. - "bucket_region": "str", # Optional. Deprecated, moved to - data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". - "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. - "parent_chunk_size": 0, # Optional. Hierarchical - options. - "semantic_threshold": 0.0 # Optional. Semantic - options. - }, - "dropbox_data_source": { - "folder": "str", # Optional. Dropbox Data Source. - "refresh_token": "str" # Optional. Refresh token. - you can obrain a refresh token by following the oauth2 flow. see - /v2/gen-ai/oauth2/dropbox/tokens for reference. - }, - "file_upload_data_source": { - "original_file_name": "str", # Optional. The - original file name. - "size_in_bytes": "str", # Optional. The size of the - file in bytes. - "stored_object_key": "str" # Optional. The object - key the file was stored as. - }, - "google_drive_data_source": { - "folder_id": "str", # Optional. Google Drive Data - Source. - "refresh_token": "str" # Optional. Refresh token. - you can obrain a refresh token by following the oauth2 flow. see - /v2/gen-ai/oauth2/google/tokens for reference. - }, - "item_path": "str", # Optional. The data sources to use for - this knowledge base. See `Organize Data Sources - `_ - for more information on data sources best practices. - "spaces_data_source": { - "bucket_name": "str", # Optional. Spaces bucket - name. - "item_path": "str", # Optional. Spaces Bucket Data - Source. - "region": "str" # Optional. Region of bucket. - }, - "web_crawler_data_source": { - "base_url": "str", # Optional. The base url to - crawl. - "crawling_option": "UNKNOWN", # Optional. Default - value is "UNKNOWN". Options for specifying how URLs found on pages - should be handled. * UNKNOWN: Default unknown value * SCOPED: Only - include the base URL. * PATH: Crawl the base URL and linked pages - within the URL path. * DOMAIN: Crawl the base URL and linked pages - within the same domain. * SUBDOMAINS: Crawl the base URL and linked - pages for any subdomain. * SITEMAP: Crawl URLs discovered in the - sitemap. Known values are: "UNKNOWN", "SCOPED", "PATH", "DOMAIN", - "SUBDOMAINS", and "SITEMAP". - "embed_media": bool, # Optional. Whether to ingest - and index media (images, etc.) on web pages. - "exclude_tags": [ - "str" # Optional. Declaring which tags to - exclude in web pages while webcrawling. - ] - } - } - ], - "embedding_model_uuid": "str", # Optional. Identifier for the `embedding - model - `_. - "name": "str", # Optional. Name of the knowledge base. - "project_id": "str", # Optional. Identifier of the DigitalOcean project this - knowledge base will belong to. - "region": "str", # Optional. The datacenter region to deploy the knowledge - base in. - "tags": [ - "str" # Optional. Tags to organize your knowledge base. - ], - "vpc_uuid": "str" # Optional. The VPC to deploy the knowledge base database - in. - } - - # response body for status code(s): 200 - response == { - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_knowledge_base( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Knowledge Base. + + To create a knowledge base, send a POST request to ``/v2/gen-ai/knowledge_bases``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "database_id": "str", # Optional. Identifier of the DigitalOcean OpenSearch + database this knowledge base will use, optional. If not provided, we create a new + database for the knowledge base in the same region as the knowledge base. + "datasources": [ + { + "aws_data_source": { + "bucket_name": "str", # Optional. Spaces bucket + name. + "item_path": "str", # Optional. AWS S3 Data Source. + "key_id": "str", # Optional. The AWS Key ID. + "region": "str", # Optional. Region of bucket. + "secret_key": "str" # Optional. The AWS Secret Key. + }, + "bucket_name": "str", # Optional. Deprecated, moved to + data_source_details. + "bucket_region": "str", # Optional. Deprecated, moved to + data_source_details. + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_options": { + "child_chunk_size": 0, # Optional. Optional data + sources to attach at creation. Omit or use an empty list to create + the knowledge base without sources, then add sources (with chunking + strategy and sizes) using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. + "max_chunk_size": 0, # Optional. Common options. + "parent_chunk_size": 0, # Optional. Hierarchical + options. + "semantic_threshold": 0.0 # Optional. Semantic + options. + }, + "dropbox_data_source": { + "folder": "str", # Optional. Dropbox Data Source. + "refresh_token": "str" # Optional. Refresh token. + you can obrain a refresh token by following the oauth2 flow. see + /v2/gen-ai/oauth2/dropbox/tokens for reference. + }, + "file_upload_data_source": { + "original_file_name": "str", # Optional. The + original file name. + "size_in_bytes": "str", # Optional. The size of the + file in bytes. + "stored_object_key": "str" # Optional. The object + key the file was stored as. + }, + "google_drive_data_source": { + "folder_id": "str", # Optional. Google Drive Data + Source. + "refresh_token": "str" # Optional. Refresh token. + you can obrain a refresh token by following the oauth2 flow. see + /v2/gen-ai/oauth2/google/tokens for reference. + }, + "item_path": "str", # Optional. Optional data sources to + attach at creation. Omit or use an empty list to create the knowledge + base without sources, then add sources (with chunking strategy and sizes) + using `Add a Data Source to a Knowledge Base + <#operation/create_knowledge_base_data_source>`_. When provided, see + `Organize Data Sources + `_ + for best practices. + "spaces_data_source": { + "bucket_name": "str", # Optional. Spaces bucket + name. + "item_path": "str", # Optional. Spaces Bucket Data + Source. + "region": "str" # Optional. Region of bucket. + }, + "web_crawler_data_source": { + "base_url": "str", # Optional. The base url to + crawl. + "crawling_option": "UNKNOWN", # Optional. Default + value is "UNKNOWN". Options for specifying how URLs found on pages + should be handled. * UNKNOWN: Default unknown value * SCOPED: Only + include the base URL. * PATH: Crawl the base URL and linked pages + within the URL path. * DOMAIN: Crawl the base URL and linked pages + within the same domain. * SUBDOMAINS: Crawl the base URL and linked + pages for any subdomain. * SITEMAP: Crawl URLs discovered in the + sitemap. Known values are: "UNKNOWN", "SCOPED", "PATH", "DOMAIN", + "SUBDOMAINS", and "SITEMAP". + "embed_media": bool, # Optional. Whether to ingest + and index media (images, etc.) on web pages. + "exclude_tags": [ + "str" # Optional. Declaring which tags to + exclude in web pages while webcrawling. + ] + } + } + ], + "embedding_model_uuid": "str", # Optional. Identifier for the `embedding + model + `_. + "name": "str", # Optional. Name of the knowledge base. + "project_id": "str", # Optional. Identifier of the DigitalOcean project this + knowledge base will belong to. + "region": "str", # Optional. The datacenter region to deploy the knowledge + base in. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "size": "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", # Optional. Default value is + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED". Known values are: + "OPEN_SEARCH_PLAN_SIZE_UNSPECIFIED", "OPEN_SEARCH_PLAN_SIZE_SMALL", + "OPEN_SEARCH_PLAN_SIZE_MEDIUM", "OPEN_SEARCH_PLAN_SIZE_LARGE", and + "OPEN_SEARCH_PLAN_SIZE_EXTRA_LARGE". + "tags": [ + "str" # Optional. Tags to organize your knowledge base. + ], + "vpc_uuid": "str" # Optional. The VPC to deploy the knowledge base database + in. + } + + # response body for status code(s): 200 + response == { + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -248912,18 +260902,14 @@ def list_knowledge_base_data_sources( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The - chunking algorithm to use for processing data sources. **Note: This - feature requires enabling the knowledgebase enhancements feature preview - flag.**. Known values are: "CHUNKING_ALGORITHM_UNKNOWN", - "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", - "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # + Optional. Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values + are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and + "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical - options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. The data sources. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic @@ -249162,17 +261148,14 @@ def create_knowledge_base_data_source( "region": "str", # Optional. Region of bucket. "secret_key": "str" # Optional. The AWS Secret Key. }, - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249212,17 +261195,15 @@ def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249362,17 +261343,15 @@ def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249505,17 +261484,14 @@ def create_knowledge_base_data_source( "region": "str", # Optional. Region of bucket. "secret_key": "str" # Optional. The AWS Secret Key. }, - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249555,17 +261531,15 @@ def create_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249793,17 +261767,14 @@ def update_knowledge_base_data_source( # JSON input template you can fill out and use as your body input. body = { - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249822,17 +261793,15 @@ def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -249975,17 +261944,15 @@ def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -250114,17 +262081,14 @@ def update_knowledge_base_data_source( # JSON input template you can fill out and use as your body input. body = { - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # Optional. - Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking algorithm to - use for processing data sources. **Note: This feature requires enabling the - knowledgebase enhancements feature preview flag.**. Known values are: + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. Default + value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and Fixed_Length - options. + "child_chunk_size": 0, # Optional. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -250143,17 +262107,15 @@ def update_knowledge_base_data_source( }, "bucket_name": "str", # Optional. Name of storage bucket - Deprecated, moved to data_source_details. - "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED", # - Optional. Default value is "CHUNKING_ALGORITHM_SECTION_BASED". The chunking - algorithm to use for processing data sources. **Note: This feature requires - enabling the knowledgebase enhancements feature preview flag.**. Known values - are: "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", + "chunking_algorithm": "CHUNKING_ALGORITHM_UNKNOWN", # Optional. + Default value is "CHUNKING_ALGORITHM_UNKNOWN". Known values are: + "CHUNKING_ALGORITHM_UNKNOWN", "CHUNKING_ALGORITHM_SECTION_BASED", "CHUNKING_ALGORITHM_HIERARCHICAL", "CHUNKING_ALGORITHM_SEMANTIC", and "CHUNKING_ALGORITHM_FIXED_LENGTH". "chunking_options": { - "child_chunk_size": 0, # Optional. Hierarchical options. - "max_chunk_size": 0, # Optional. Section_Based and - Fixed_Length options. + "child_chunk_size": 0, # Optional. Data Source configuration + for Knowledge Bases. + "max_chunk_size": 0, # Optional. Common options. "parent_chunk_size": 0, # Optional. Hierarchical options. "semantic_threshold": 0.0 # Optional. Semantic options. }, @@ -250690,7 +262652,7 @@ def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: "database_status": "CREATING", # Optional. Default value is "CREATING". Known values are: "CREATING", "ONLINE", "POWEROFF", "REBUILDING", "REBALANCING", "DECOMMISSIONED", "FORKING", "MIGRATING", "RESIZING", "RESTORING", "POWERING_ON", - and "UNHEALTHY". + "UNHEALTHY", and "UPGRADING". "knowledge_base": { "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when the knowledge base was added to the agent. @@ -250780,6 +262742,11 @@ def get_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -250910,12 +262877,15 @@ def update_knowledge_base( # JSON input template you can fill out and use as your body input. body = { "database_id": "str", # Optional. The id of the DigitalOcean database this - knowledge base will use, optiona. - "embedding_model_uuid": "str", # Optional. Identifier for the foundation - model. + knowledge base will use, optional. "name": "str", # Optional. Knowledge base name. "project_id": "str", # Optional. The id of the DigitalOcean project this knowledge base will belong to. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize your knowledge base. ], @@ -251013,147 +262983,157 @@ def update_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. - "tags": [ - "str" # Optional. Tags to organize related resources. - ], - "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. - "user_id": "str", # Optional. Id of user that created the knowledge - base. - "uuid": "str" # Optional. Unique id for knowledge base. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_knowledge_base( - self, - uuid: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update a Knowledge Base. - - To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. - - :param uuid: Knowledge base id. Required. - :type uuid: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "knowledge_base": { - "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when - the knowledge base was added to the agent. - "created_at": "2020-02-20 00:00:00", # Optional. Creation date / - time. - "database_id": "str", # Optional. Knowledgebase Description. - "embedding_model_uuid": "str", # Optional. Knowledgebase - Description. - "is_public": bool, # Optional. Whether the knowledge base is public - or not. - "last_indexing_job": { - "completed_datasources": 0, # Optional. Number of - datasources indexed completed. - "created_at": "2020-02-20 00:00:00", # Optional. Creation - date / time. - "data_source_jobs": [ - { - "completed_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source completed indexing. - "data_source_uuid": "str", # Optional. Uuid - of the indexed data source. - "error_details": "str", # Optional. A - detailed error description. - "error_msg": "str", # Optional. A string - code provinding a hint which part of the system experienced an - error. - "failed_item_count": "str", # Optional. - Total count of files that have failed. - "indexed_file_count": "str", # Optional. - Total count of files that have been indexed. - "indexed_item_count": "str", # Optional. - Total count of files that have been indexed. - "removed_item_count": "str", # Optional. - Total count of files that have been removed. - "skipped_item_count": "str", # Optional. - Total count of files that have been skipped. - "started_at": "2020-02-20 00:00:00", # - Optional. Timestamp when data source started indexing. - "status": "DATA_SOURCE_STATUS_UNKNOWN", # - Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known - values are: "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", - and "DATA_SOURCE_STATUS_CANCELLED". - "total_bytes": "str", # Optional. Total size - of files in data source in bytes. - "total_bytes_indexed": "str", # Optional. - Total size of files in data source in bytes that have been - indexed. - "total_file_count": "str" # Optional. Total - file count in the data source. - } - ], - "data_source_uuids": [ - "str" # Optional. IndexingJob description. - ], - "finished_at": "2020-02-20 00:00:00", # Optional. - IndexingJob description. - "is_report_available": bool, # Optional. Boolean value to - determine if the indexing job details are available. - "knowledge_base_uuid": "str", # Optional. Knowledge base id. - "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default - value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: - "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and - "BATCH_JOB_PHASE_CANCELLED". - "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob - description. - "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default - value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: - "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", - "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", - "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", - "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". - "tokens": 0, # Optional. Number of tokens [This field is - deprecated]. - "total_datasources": 0, # Optional. Number of datasources - being indexed. - "total_tokens": "str", # Optional. Total Tokens Consumed By - the Indexing Job. - "updated_at": "2020-02-20 00:00:00", # Optional. Last - modified. - "uuid": "str" # Optional. Unique id. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, + "tags": [ + "str" # Optional. Tags to organize related resources. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. + "user_id": "str", # Optional. Id of user that created the knowledge + base. + "uuid": "str" # Optional. Unique id for knowledge base. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_knowledge_base( + self, + uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a Knowledge Base. + + To update a knowledge base, send a PUT request to ``/v2/gen-ai/knowledge_bases/{uuid}``. + + :param uuid: Knowledge base id. Required. + :type uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "knowledge_base": { + "added_to_agent_at": "2020-02-20 00:00:00", # Optional. Time when + the knowledge base was added to the agent. + "created_at": "2020-02-20 00:00:00", # Optional. Creation date / + time. + "database_id": "str", # Optional. Knowledgebase Description. + "embedding_model_uuid": "str", # Optional. Knowledgebase + Description. + "is_public": bool, # Optional. Whether the knowledge base is public + or not. + "last_indexing_job": { + "completed_datasources": 0, # Optional. Number of + datasources indexed completed. + "created_at": "2020-02-20 00:00:00", # Optional. Creation + date / time. + "data_source_jobs": [ + { + "completed_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source completed indexing. + "data_source_uuid": "str", # Optional. Uuid + of the indexed data source. + "error_details": "str", # Optional. A + detailed error description. + "error_msg": "str", # Optional. A string + code provinding a hint which part of the system experienced an + error. + "failed_item_count": "str", # Optional. + Total count of files that have failed. + "indexed_file_count": "str", # Optional. + Total count of files that have been indexed. + "indexed_item_count": "str", # Optional. + Total count of files that have been indexed. + "removed_item_count": "str", # Optional. + Total count of files that have been removed. + "skipped_item_count": "str", # Optional. + Total count of files that have been skipped. + "started_at": "2020-02-20 00:00:00", # + Optional. Timestamp when data source started indexing. + "status": "DATA_SOURCE_STATUS_UNKNOWN", # + Optional. Default value is "DATA_SOURCE_STATUS_UNKNOWN". Known + values are: "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + and "DATA_SOURCE_STATUS_CANCELLED". + "total_bytes": "str", # Optional. Total size + of files in data source in bytes. + "total_bytes_indexed": "str", # Optional. + Total size of files in data source in bytes that have been + indexed. + "total_file_count": "str" # Optional. Total + file count in the data source. + } + ], + "data_source_uuids": [ + "str" # Optional. IndexingJob description. + ], + "finished_at": "2020-02-20 00:00:00", # Optional. + IndexingJob description. + "is_report_available": bool, # Optional. Boolean value to + determine if the indexing job details are available. + "knowledge_base_uuid": "str", # Optional. Knowledge base id. + "phase": "BATCH_JOB_PHASE_UNKNOWN", # Optional. Default + value is "BATCH_JOB_PHASE_UNKNOWN". Known values are: + "BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", "BATCH_JOB_PHASE_ERROR", and + "BATCH_JOB_PHASE_CANCELLED". + "started_at": "2020-02-20 00:00:00", # Optional. IndexingJob + description. + "status": "INDEX_JOB_STATUS_UNKNOWN", # Optional. Default + value is "INDEX_JOB_STATUS_UNKNOWN". Known values are: + "INDEX_JOB_STATUS_UNKNOWN", "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", and "INDEX_JOB_STATUS_CANCELLED". + "tokens": 0, # Optional. Number of tokens [This field is + deprecated]. + "total_datasources": 0, # Optional. Number of datasources + being indexed. + "total_tokens": "str", # Optional. Total Tokens Consumed By + the Indexing Job. + "updated_at": "2020-02-20 00:00:00", # Optional. Last + modified. + "uuid": "str" # Optional. Unique id. + }, + "name": "str", # Optional. Name of knowledge base. + "project_id": "str", # Optional. Knowledgebase Description. + "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. }, - "name": "str", # Optional. Name of knowledge base. - "project_id": "str", # Optional. Knowledgebase Description. - "region": "str", # Optional. Region code. "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -251199,12 +263179,15 @@ def update_knowledge_base( # JSON input template you can fill out and use as your body input. body = { "database_id": "str", # Optional. The id of the DigitalOcean database this - knowledge base will use, optiona. - "embedding_model_uuid": "str", # Optional. Identifier for the foundation - model. + knowledge base will use, optional. "name": "str", # Optional. Knowledge base name. "project_id": "str", # Optional. The id of the DigitalOcean project this knowledge base will belong to. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled for + retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize your knowledge base. ], @@ -251302,6 +263285,11 @@ def update_knowledge_base( "name": "str", # Optional. Name of knowledge base. "project_id": "str", # Optional. Knowledgebase Description. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether reranking is enabled + for retrieval. + "model": "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. ], @@ -251528,6 +263516,1475 @@ def delete_knowledge_base(self, uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore + @overload + def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Model Evaluation Dataset File Upload. + + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Model Evaluation Dataset File Upload. + + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_model_eval_dataset_upload_presigned_urls( # pylint: disable=name-too-long + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Presigned URLs for Model Evaluation Dataset File Upload. + + To create presigned URLs for model evaluation dataset file upload, send a POST request to + ``/v2/genai/model_evaluation/datasets/file_upload_presigned_urls``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + { + "file_name": "str", # Optional. Local filename. + "file_size": "str" # Optional. The size of the file in + bytes. + } + ] + } + + # response body for status code(s): 200 + response == { + "request_id": "str", # Optional. The ID generated for the request for + Presigned URLs. + "uploads": [ + { + "expires_at": "2020-02-20 00:00:00", # Optional. The time + the url expires at. + "object_key": "str", # Optional. The unique object key to + store the file as. + "original_file_name": "str", # Optional. The original file + name. + "presigned_url": "str" # Optional. The actual presigned URL + the client can use to upload the file directly. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_create_model_eval_dataset_upload_presigned_urls_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def list_model_evaluation_metrics(self, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Model Evaluation Metrics. + + To list all available metrics for model evaluation, send a GET request to + ``/v2/genai/model_evaluation_metrics``. + + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "metrics": [ + { + "category": "METRIC_CATEGORY_UNSPECIFIED", # Optional. + Default value is "METRIC_CATEGORY_UNSPECIFIED". Known values are: + "METRIC_CATEGORY_UNSPECIFIED", "METRIC_CATEGORY_CORRECTNESS", + "METRIC_CATEGORY_USER_OUTCOMES", "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. List of model evaluation + metrics. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", # + Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. List of model evaluation + metrics. + "metric_name": "str", # Optional. List of model evaluation + metrics. + "metric_rank": 0, # Optional. List of model evaluation + metrics. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. + Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. List of model evaluation + metrics. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known values + are: "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for the + metric. + "range_min": 0.0 # Optional. The minimum value for the + metric. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_list_model_evaluation_metrics_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def list_model_evaluation_runs( + self, + *, + eval_preset_uuid: Optional[str] = None, + status: str = "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """List Model Evaluation Runs. + + To list model evaluation runs, send a GET request to ``/v2/genai/model_evaluation_runs``. + + :keyword eval_preset_uuid: UUID of the evaluation preset to filter by. Default value is None. + :paramtype eval_preset_uuid: str + :keyword status: Filter by evaluation run status. Known values are: + "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", "MODEL_EVALUATION_RUN_QUEUED", + "MODEL_EVALUATION_RUN_RUNNING_DATASET", "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", + "MODEL_EVALUATION_RUN_CANCELLING", "MODEL_EVALUATION_RUN_CANCELLED", + "MODEL_EVALUATION_RUN_SUCCESSFUL", "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". + :paramtype status: str + :keyword page: Page number. Default value is None. + :paramtype page: int + :keyword per_page: Items per page. Default value is None. + :paramtype per_page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "runs": [ + { + "candidate_model_name": "str", # Optional. Name of the + candidate model being evaluated. + "candidate_model_source": + "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. Default value is + "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs against the + serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the + candidate model being evaluated. + "created_at": "2020-02-20 00:00:00", # Optional. Timestamp + when the run was created. + "dataset_name": "str", # Optional. Name of the dataset used + for evaluation. + "dataset_uuid": "str", # Optional. UUID of the dataset used + for evaluation. + "eval_run_uuid": "str", # Optional. UUID of the evaluation + run. + "judge_model_name": "str", # Optional. Summary view of + evaluation runs for the run history list. + "judge_model_uuid": "str", # Optional. Judge model used to + score responses. + "name": "str", # Optional. Name of the evaluation run. + "status": "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED" # + Optional. Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". + Model Evaluation Run Statuses. Known values are: + "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", "MODEL_EVALUATION_RUN_QUEUED", + "MODEL_EVALUATION_RUN_RUNNING_DATASET", + "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", + "MODEL_EVALUATION_RUN_CANCELLING", "MODEL_EVALUATION_RUN_CANCELLED", + "MODEL_EVALUATION_RUN_SUCCESSFUL", + "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_list_model_evaluation_runs_request( + eval_preset_uuid=eval_preset_uuid, + status=status, + page=page, + per_page=per_page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_model_evaluation_run( + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Model Evaluation Run. + + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for the + candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model slug used to call the + candidate model API. For dedicated inference, this is the model slug from the + deployment. For serverless, this should match the model's internal name. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. + Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs + against the serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the candidate model to + evaluate. + "dataset_uuid": "str", # Optional. UUID of the dataset to use for + evaluation. + "eval_preset_uuid": "str", # Optional. + "judge_model_uuid": "str", # Optional. UUID of the judge model used to score + responses. + "metric_uuids": [ + "str" # Optional. UUIDs of metrics to evaluate (selected from + ListModelEvaluationMetrics). + ], + "name": "str", # Optional. + "preset_name": "str", # Optional. + "save_as_preset": bool, # Optional. .. role:: raw-html-m2r(raw) :format: + html If true, saves the inline config as a reusable preset"" + :raw-html-m2r:`
` Ignored when eval_preset_uuid is provided. + "source": "str", # Optional. Source of the run creation (api, sdk, cli). + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + } + } + + # response body for status code(s): 200 + response == { + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_model_evaluation_run( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Model Evaluation Run. + + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_model_evaluation_run( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Model Evaluation Run. + + To create a model evaluation run, send a POST request to ``/v2/genai/model_evaluation_runs``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration for the + candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for the + candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model slug used to call the + candidate model API. For dedicated inference, this is the model slug from the + deployment. For serverless, this should match the model's internal name. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # Optional. + Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether inference runs + against the serverless platform, a dedicated deployment, or a model router. Known + values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. UUID of the candidate model to + evaluate. + "dataset_uuid": "str", # Optional. UUID of the dataset to use for + evaluation. + "eval_preset_uuid": "str", # Optional. + "judge_model_uuid": "str", # Optional. UUID of the judge model used to score + responses. + "metric_uuids": [ + "str" # Optional. UUIDs of metrics to evaluate (selected from + ListModelEvaluationMetrics). + ], + "name": "str", # Optional. + "preset_name": "str", # Optional. + "save_as_preset": bool, # Optional. .. role:: raw-html-m2r(raw) :format: + html If true, saves the inline config as a reusable preset"" + :raw-html-m2r:`
` Ignored when eval_preset_uuid is provided. + "source": "str", # Optional. Source of the run creation (api, sdk, cli). + "star_metric": { + "metric_uuid": "str", # Optional. + "name": "str", # Optional. + "success_threshold": 0.0, # Optional. The success threshold for the + star metric. This is a value that the metric must reach to be considered + successful. + "success_threshold_pct": 0 # Optional. The success threshold for the + star metric. This is a percentage value between 0 and 100. + } + } + + # response body for status code(s): 200 + response == { + "eval_run_uuid": "str" # Optional. UUID of the created evaluation run. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_genai_create_model_evaluation_run_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_model_evaluation_run( + self, + eval_run_uuid: str, + *, + page: Optional[int] = None, + per_page: Optional[int] = None, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Model Evaluation Run. + + To retrieve a model evaluation run, send a GET request to + ``/v2/genai/model_evaluation_runs/{eval_run_uuid}``. + + :param eval_run_uuid: UUID of the evaluation run. Required. + :type eval_run_uuid: str + :keyword page: Page number for per-prompt results (defaults to 1). Default value is None. + :paramtype page: int + :keyword per_page: Number of per-prompt results per page (defaults to 50). Default value is + None. + :paramtype per_page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "links": { + "pages": { + "first": "str", # Optional. First page. + "last": "str", # Optional. Last page. + "next": "str", # Optional. Next page. + "previous": "str" # Optional. Previous page. + } + }, + "meta": { + "page": 0, # Optional. The current page. + "pages": 0, # Optional. Total number of pages. + "total": 0 # Optional. Total amount of items over all pages. + }, + "results": [ + { + "candidate_model_name": "str", # Optional. Paginated + per-prompt evaluation results. + "candidate_model_uuid": "str", # Optional. Paginated + per-prompt evaluation results. + "ground_truth": "str", # Optional. Paginated per-prompt + evaluation results. + "input": "str", # Optional. The input query sent to the + candidate model. + "metric_results": [ + { + "error_description": "str", # Optional. + Error description if the metric could not be calculated. + "metric_name": "str", # Optional. Metric + name. + "metric_value_type": + "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is + "METRIC_VALUE_TYPE_UNSPECIFIED". Known values are: + "METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", and "METRIC_VALUE_TYPE_PERCENTAGE". + "number_value": 0.0, # Optional. The value + of the metric as a number. + "reasoning": "str", # Optional. Reasoning of + the metric result. + "string_value": "str" # Optional. The value + of the metric as a string. + } + ], + "output": "str" # Optional. The response from the candidate + model. + } + ], + "run": { + "candidate_inference_config": { + "max_tokens": 0, # Optional. Inference configuration for the + candidate model during evaluation. + "stop_token": "str", # Optional. Inference configuration for + the candidate model during evaluation. + "system_prompt": "str", # Optional. Inference configuration + for the candidate model during evaluation. + "temperature": 0.0 # Optional. Inference configuration for + the candidate model during evaluation. + }, + "candidate_model_name": "str", # Optional. Model Evaluation Run + Detail - full view returned when fetching a specific run. + "candidate_model_source": "CANDIDATE_MODEL_SOURCE_SERVERLESS", # + Optional. Default value is "CANDIDATE_MODEL_SOURCE_SERVERLESS". Whether + inference runs against the serverless platform, a dedicated deployment, or a + model router. Known values are: "CANDIDATE_MODEL_SOURCE_SERVERLESS", + "CANDIDATE_MODEL_SOURCE_DEDICATED", and "CANDIDATE_MODEL_SOURCE_ROUTER". + "candidate_model_uuid": "str", # Optional. Candidate model being + evaluated. + "completed_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "created_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "dataset_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "dataset_uuid": "str", # Optional. Dataset used for the evaluation. + "error_description": "str", # Optional. Error description if the run + failed or partially succeeded. + "eval_preset_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "eval_preset_uuid": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "eval_run_uuid": "str", # Optional. UUID of the evaluation run. + "judge_model_name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "judge_model_uuid": "str", # Optional. Judge model used to score + responses. + "metrics": [ + { + "category": "METRIC_CATEGORY_UNSPECIFIED", # + Optional. Default value is "METRIC_CATEGORY_UNSPECIFIED". Known + values are: "METRIC_CATEGORY_UNSPECIFIED", + "METRIC_CATEGORY_CORRECTNESS", "METRIC_CATEGORY_USER_OUTCOMES", + "METRIC_CATEGORY_SAFETY_AND_SECURITY", + "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". + "description": "str", # Optional. Metrics selected + for this evaluation. + "evaluation_scope": "EVALUATION_SCOPE_UNSPECIFIED", + # Optional. Default value is "EVALUATION_SCOPE_UNSPECIFIED". Scope + that determines whether a metric belongs to agent evaluation or model + evaluation. For backwards compatibility, UNSPECIFIED defaults to + agent metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". + "inverted": bool, # Optional. If true, the metric is + inverted, meaning that a lower value is better. + "is_metric_goal": bool, # Optional. Metrics selected + for this evaluation. + "metric_name": "str", # Optional. Metrics selected + for this evaluation. + "metric_rank": 0, # Optional. Metrics selected for + this evaluation. + "metric_type": "METRIC_TYPE_UNSPECIFIED", # + Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values + are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". + "metric_uuid": "str", # Optional. Metrics selected + for this evaluation. + "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", + # Optional. Default value is "METRIC_VALUE_TYPE_UNSPECIFIED". Known + values are: "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING", and + "METRIC_VALUE_TYPE_PERCENTAGE". + "range_max": 0.0, # Optional. The maximum value for + the metric. + "range_min": 0.0 # Optional. The minimum value for + the metric. + } + ], + "name": "str", # Optional. Name of the evaluation run. + "result_summary": { + "end_time": "2020-02-20 00:00:00", # Optional. Aggregated + result summary for a completed model evaluation run. + "metric_summaries": [ + { + "description": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "fail_percent": 0.0, # Optional. Per-metric + aggregated pass/fail statistics. + "metric_name": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "metric_uuid": "str", # Optional. Per-metric + aggregated pass/fail statistics. + "pass_percent": 0.0 # Optional. Per-metric + aggregated pass/fail statistics. + } + ], + "overall_score_percent": 0.0, # Optional. Aggregated result + summary for a completed model evaluation run. + "per_model_summaries": { + "summaries": [ + { + "metric_summaries": [ + { + "description": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "fail_percent": 0.0, + # Optional. Per-metric pass/fail for only this + model's prompts. + "metric_name": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "metric_uuid": "str", + # Optional. Per-metric pass/fail for only this + model's prompts. + "pass_percent": 0.0 + # Optional. Per-metric pass/fail for only this + model's prompts. + } + ], + "model_name": "str", # Optional. + Name/slug of the model (matches routed_model from results). + "performance_metrics": { + "candidate_latency": { + "avg_e2e_latency_ms": + 0.0, # Optional. Average end-to-end latency across + all invocations. + "max_e2e_latency_ms": + 0.0, # Optional. Maximum end-to-end latency + observed. + "min_e2e_latency_ms": + 0.0, # Optional. Minimum end-to-end latency + observed. + "p50_latency_ms": + 0.0, # Optional. P50 (median) latency. + "p90_latency_ms": + 0.0, # Optional. P90 latency. + "p95_latency_ms": 0.0 + # Optional. P95 latency. + }, + "token_usage": { + "total_candidate_input_tokens": "str", # Optional. + All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_output_tokens": "str", # Optional. + All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_input_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_output_tokens": "str", # Optional. All + performance metrics are for the candidate model + unless noted otherwise. + "total_judge_tokens": + "str" # Optional. All performance metrics are for + the candidate model unless noted otherwise. + } + }, + "prompt_count": 0 # Optional. Number + of prompts routed to this model. + } + ] + }, + "performance_metrics": { + "candidate_latency": { + "avg_e2e_latency_ms": 0.0, # Optional. + Average end-to-end latency across all invocations. + "max_e2e_latency_ms": 0.0, # Optional. + Maximum end-to-end latency observed. + "min_e2e_latency_ms": 0.0, # Optional. + Minimum end-to-end latency observed. + "p50_latency_ms": 0.0, # Optional. P50 + (median) latency. + "p90_latency_ms": 0.0, # Optional. P90 + latency. + "p95_latency_ms": 0.0 # Optional. P95 + latency. + }, + "token_usage": { + "total_candidate_input_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_output_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_candidate_tokens": "str", # Optional. + All performance metrics are for the candidate model unless noted + otherwise. + "total_judge_input_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_judge_output_tokens": "str", # + Optional. All performance metrics are for the candidate model + unless noted otherwise. + "total_judge_tokens": "str" # Optional. All + performance metrics are for the candidate model unless noted + otherwise. + } + }, + "pricing": { + "currency": "str", # Optional. Currency code (e.g., + "USD"). + "judge_model_pricing": { + "input_cost": 0.0, # Optional. Cost of input + tokens. + "output_cost": 0.0, # Optional. Cost of + output tokens. + "total_cost": 0.0 # Optional. Total cost + (input + output). + }, + "per_candidate_model_pricing": [ + { + "model_name": "str", # Optional. + Model name (for display purposes). + "model_uuid": "str", # Optional. + Model UUID. + "pricing": { + "input_cost": 0.0, # + Optional. Cost of input tokens. + "output_cost": 0.0, # + Optional. Cost of output tokens. + "total_cost": 0.0 # + Optional. Total cost (input + output). + }, + "prompt_count": 0 # Optional. Number + of prompts/rows routed to this model. + } + ], + "total_cost": 0.0 # Optional. Total cost of the + evaluation run (all candidates + judge). + }, + "star_metric_summary": { + "metric_name": "str", # Optional. Star metric + summary with identifying details and threshold. + "metric_uuid": "str", # Optional. Star metric + summary with identifying details and threshold. + "threshold": 0.0 # Optional. Star metric summary + with identifying details and threshold. + }, + "start_time": "2020-02-20 00:00:00", # Optional. Run timing. + "total_duration_seconds": 0 # Optional. Total wall-clock + duration in seconds. + }, + "star_metric": { + "metric_uuid": "str", # Optional. Model Evaluation Run + Detail - full view returned when fetching a specific run. + "name": "str", # Optional. Model Evaluation Run Detail - + full view returned when fetching a specific run. + "success_threshold": 0.0, # Optional. The success threshold + for the star metric. This is a value that the metric must reach to be + considered successful. + "success_threshold_pct": 0 # Optional. The success threshold + for the star metric. This is a percentage value between 0 and 100. + }, + "started_at": "2020-02-20 00:00:00", # Optional. Model Evaluation + Run Detail - full view returned when fetching a specific run. + "status": "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED" # Optional. + Default value is "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED". Model Evaluation + Run Statuses. Known values are: "MODEL_EVALUATION_RUN_STATUS_UNSPECIFIED", + "MODEL_EVALUATION_RUN_QUEUED", "MODEL_EVALUATION_RUN_RUNNING_DATASET", + "MODEL_EVALUATION_RUN_EVALUATING_RESULTS", "MODEL_EVALUATION_RUN_CANCELLING", + "MODEL_EVALUATION_RUN_CANCELLED", "MODEL_EVALUATION_RUN_SUCCESSFUL", + "MODEL_EVALUATION_RUN_PARTIALLY_SUCCESSFUL", and + "MODEL_EVALUATION_RUN_FAILED". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_model_evaluation_run_request( + eval_run_uuid=eval_run_uuid, + page=page, + per_page=per_page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_model_evaluation_run_results_download_url( # pylint: disable=name-too-long + self, eval_run_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Get Download URL for Model Evaluation Run Results. + + To get a presigned download URL for model evaluation run results (gzip-compressed JSON), send a + GET request to ``/v2/genai/model_evaluation_runs/{eval_run_uuid}/results/download_url``. + + :param eval_run_uuid: UUID of the evaluation run. Required. + :type eval_run_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "download_url": "str", # Optional. The presigned URL to download the + gzip-compressed JSON results file (.json.gz). + "expires_at": "2020-02-20 00:00:00" # Optional. The time the URL expires at. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_genai_get_model_evaluation_run_results_download_url_request( + eval_run_uuid=eval_run_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + @distributed_trace def list_models( self, @@ -251591,8 +265048,28 @@ def list_models( "url": "str", # Optional. Agreement Description. "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores for this + model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. Model capabilities (inference, + reasoning, vectorization, etc.). + ], + "context_window": "str", # Optional. Context window (maximum + tokens). "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "description": "str", # Optional. Model description. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities + supported by this endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "id": "str", # Optional. Human-readable model identifier. "is_foundational": bool, # Optional. True if it is a foundational model provided by do. @@ -251602,9 +265079,61 @@ def list_models( of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle status of + the model (internal, public-preview, active, deprecated, end_of_life). + "modalities": { + "input": [ + "str" # Optional. Input/output modalities. + ], + "output": [ + "str" # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Display name of the model. + "parameter_count": 0.0, # Optional. Parameter count in + billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. + "pricing": { + "input_price_per_million": 0.0, # Optional. Pricing + per million tokens (aligns with existing ModelPrice pattern). + "output_price_per_million": 0.0 # Optional. Pricing + per million tokens (aligns with existing ModelPrice pattern). + }, + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. + Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: + "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and + "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning efforts for + this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model supports + extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, embedding, + image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been fully @@ -252989,8 +266518,27 @@ def list_openai_api_keys( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -253004,16 +266552,64 @@ def list_openai_api_keys( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -253194,211 +266790,358 @@ def create_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, - "created_at": "2020-02-20 00:00:00", # Optional. - Creation date / time. - "inference_name": "str", # Optional. Internally used - name. - "inference_version": "str", # Optional. Internally - used version. - "is_foundational": bool, # Optional. True if it is a - foundational model provided by do. - "kb_default_chunk_size": 0, # Optional. Default - chunking size limit to show in UI. - "kb_max_chunk_size": 0, # Optional. Maximum chunk - size limit of model. - "kb_min_chunk_size": 0, # Optional. Minimum chunking - size token limits if model supports KNOWLEDGEBASE usecase. - "metadata": {}, # Optional. Additional meta data. - "name": "str", # Optional. Name of the model. - "parent_uuid": "str", # Optional. Unique id of the - model, this model is based on. - "provider": "MODEL_PROVIDER_DIGITALOCEAN", # - Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known - values are: "MODEL_PROVIDER_DIGITALOCEAN", - "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". - "updated_at": "2020-02-20 00:00:00", # Optional. - Last modified. - "upload_complete": bool, # Optional. Model has been - fully uploaded. - "url": "str", # Optional. Download url. - "usecases": [ - "str" # Optional. Usecases of the model. + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], + "inference_name": "str", # Optional. Internally used + name. + "inference_version": "str", # Optional. Internally + used version. + "is_foundational": bool, # Optional. True if it is a + foundational model provided by do. + "kb_default_chunk_size": 0, # Optional. Default + chunking size limit to show in UI. + "kb_max_chunk_size": 0, # Optional. Maximum chunk + size limit of model. + "kb_min_chunk_size": 0, # Optional. Minimum chunking + size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). + "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, + "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. + "parent_uuid": "str", # Optional. Unique id of the + model, this model is based on. + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # + Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known + values are: "MODEL_PROVIDER_DIGITALOCEAN", + "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "upload_complete": bool, # Optional. Model has been + fully uploaded. + "url": "str", # Optional. Download url. + "usecases": [ + "str" # Optional. Usecases of the model. + ], + "uuid": "str", # Optional. Unique id. + "version": { + "major": 0, # Optional. Major version + number. + "minor": 0, # Optional. Minor version + number. + "patch": 0 # Optional. Patch version number. + } + } + ], + "name": "str", # Optional. Name. + "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated + date. + "uuid": "str" # Optional. Uuid. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_openai_api_key( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create OpenAI API Key. + + To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "api_key_info": { + "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. + "created_by": "str", # Optional. Created by user id from DO. + "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. + "models": [ + { + "agreement": { + "description": "str", # Optional. Agreement + Description. + "name": "str", # Optional. Agreement + Description. + "url": "str", # Optional. Agreement + Description. + "uuid": "str" # Optional. Agreement + Description. + }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], + "inference_name": "str", # Optional. Internally used + name. + "inference_version": "str", # Optional. Internally + used version. + "is_foundational": bool, # Optional. True if it is a + foundational model provided by do. + "kb_default_chunk_size": 0, # Optional. Default + chunking size limit to show in UI. + "kb_max_chunk_size": 0, # Optional. Maximum chunk + size limit of model. + "kb_min_chunk_size": 0, # Optional. Minimum chunking + size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). + "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, + "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. + "parent_uuid": "str", # Optional. Unique id of the + model, this model is based on. + "provider": "MODEL_PROVIDER_DIGITALOCEAN", # + Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known + values are: "MODEL_PROVIDER_DIGITALOCEAN", + "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "upload_complete": bool, # Optional. Model has been + fully uploaded. + "url": "str", # Optional. Download url. + "usecases": [ + "str" # Optional. Usecases of the model. + ], + "uuid": "str", # Optional. Unique id. + "version": { + "major": 0, # Optional. Major version + number. + "minor": 0, # Optional. Minor version + number. + "patch": 0 # Optional. Patch version number. + } + } + ], + "name": "str", # Optional. Name. + "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated + date. + "uuid": "str" # Optional. Uuid. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_openai_api_key( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create OpenAI API Key. + + To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "api_key": "str", # Optional. OpenAI API key. + "name": "str" # Optional. Name of the key. + } + + # response body for status code(s): 200 + response == { + "api_key_info": { + "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. + "created_by": "str", # Optional. Created by user id from DO. + "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. + "models": [ + { + "agreement": { + "description": "str", # Optional. Agreement + Description. + "name": "str", # Optional. Agreement + Description. + "url": "str", # Optional. Agreement + Description. + "uuid": "str" # Optional. Agreement + Description. + }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). ], - "uuid": "str", # Optional. Unique id. - "version": { - "major": 0, # Optional. Major version - number. - "minor": 0, # Optional. Minor version - number. - "patch": 0 # Optional. Patch version number. - } - } - ], - "name": "str", # Optional. Name. - "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated - date. - "uuid": "str" # Optional. Uuid. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def create_openai_api_key( - self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Create OpenAI API Key. - - To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. - - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "api_key_info": { - "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. - "created_by": "str", # Optional. Created by user id from DO. - "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. - "models": [ - { - "agreement": { - "description": "str", # Optional. Agreement - Description. - "name": "str", # Optional. Agreement - Description. - "url": "str", # Optional. Agreement - Description. - "uuid": "str" # Optional. Agreement - Description. - }, + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. - "inference_name": "str", # Optional. Internally used - name. - "inference_version": "str", # Optional. Internally - used version. - "is_foundational": bool, # Optional. True if it is a - foundational model provided by do. - "kb_default_chunk_size": 0, # Optional. Default - chunking size limit to show in UI. - "kb_max_chunk_size": 0, # Optional. Maximum chunk - size limit of model. - "kb_min_chunk_size": 0, # Optional. Minimum chunking - size token limits if model supports KNOWLEDGEBASE usecase. - "metadata": {}, # Optional. Additional meta data. - "name": "str", # Optional. Name of the model. - "parent_uuid": "str", # Optional. Unique id of the - model, this model is based on. - "provider": "MODEL_PROVIDER_DIGITALOCEAN", # - Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known - values are: "MODEL_PROVIDER_DIGITALOCEAN", - "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". - "updated_at": "2020-02-20 00:00:00", # Optional. - Last modified. - "upload_complete": bool, # Optional. Model has been - fully uploaded. - "url": "str", # Optional. Download url. - "usecases": [ - "str" # Optional. Usecases of the model. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } ], - "uuid": "str", # Optional. Unique id. - "version": { - "major": 0, # Optional. Major version - number. - "minor": 0, # Optional. Minor version - number. - "patch": 0 # Optional. Patch version number. - } - } - ], - "name": "str", # Optional. Name. - "updated_at": "2020-02-20 00:00:00", # Optional. Key last updated - date. - "uuid": "str" # Optional. Uuid. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def create_openai_api_key( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create OpenAI API Key. - - To create an OpenAI API key, send a POST request to ``/v2/gen-ai/openai/keys``. - - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "api_key": "str", # Optional. OpenAI API key. - "name": "str" # Optional. Name of the key. - } - - # response body for status code(s): 200 - response == { - "api_key_info": { - "created_at": "2020-02-20 00:00:00", # Optional. Key creation date. - "created_by": "str", # Optional. Created by user id from DO. - "deleted_at": "2020-02-20 00:00:00", # Optional. Key deleted date. - "models": [ - { - "agreement": { - "description": "str", # Optional. Agreement - Description. - "name": "str", # Optional. Agreement - Description. - "url": "str", # Optional. Agreement - Description. - "uuid": "str" # Optional. Agreement - Description. - }, - "created_at": "2020-02-20 00:00:00", # Optional. - Creation date / time. "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -253411,14 +267154,59 @@ def create_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -253584,8 +267372,27 @@ def get_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -253598,14 +267405,59 @@ def get_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -253773,8 +267625,27 @@ def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -253787,14 +267658,59 @@ def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -253878,8 +267794,27 @@ def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -253892,14 +267827,59 @@ def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -253985,8 +267965,27 @@ def update_openai_api_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -253999,14 +267998,59 @@ def update_openai_api_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -254173,8 +268217,27 @@ def delete_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -254187,14 +268250,59 @@ def delete_openai_api_key(self, api_key_uuid: str, **kwargs: Any) -> JSON: size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -254592,6 +268700,12 @@ def list_agents_by_openai_key( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -254619,6 +268733,24 @@ def list_agents_by_openai_key( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -254630,8 +268762,27 @@ def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -254644,14 +268795,59 @@ def list_agents_by_openai_key( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -254689,8 +268885,27 @@ def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -254704,10 +268919,25 @@ def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -254715,6 +268945,42 @@ def list_agents_by_openai_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -254745,6 +269011,45 @@ def list_agents_by_openai_key( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -254765,8 +269070,27 @@ def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -254780,10 +269104,25 @@ def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -254791,6 +269130,42 @@ def list_agents_by_openai_key( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -254824,6 +269199,8 @@ def list_agents_by_openai_key( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -254971,6 +269348,12 @@ def list_agents_by_openai_key( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -254998,8 +269381,27 @@ def list_agents_by_openai_key( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -255013,16 +269415,64 @@ def list_agents_by_openai_key( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -255066,6 +269516,8 @@ def list_agents_by_openai_key( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -255108,6 +269560,13 @@ def list_agents_by_openai_key( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -255138,6 +269597,16 @@ def list_agents_by_openai_key( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -255151,8 +269620,10 @@ def list_agents_by_openai_key( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -256320,6 +270791,12 @@ def list_workspaces(self, **kwargs: Any) -> JSON: Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -256347,6 +270824,27 @@ def list_workspaces(self, **kwargs: Any) -> JSON: Name of the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. + Optional list of allowed tool names to expose from + this server. + ], + "authorization": "str", # + Optional. Optional authorization header value for the MCP + server. + "headers": { + "str": "str" # + Optional. Optional additional headers to send to the + MCP server. + }, + "server_label": "str", # + Optional. A label identifying this MCP server. + "server_url": "str" # + Optional. The URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # @@ -256358,8 +270856,27 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -256373,10 +270890,25 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -256384,6 +270916,42 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -256426,9 +270994,30 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": + {}, # Optional. Benchmark scores for this model, + stored as arbitrary JSON. + "capabilities": [ + "str" # + Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": + "str", # Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities supported + by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": @@ -256443,10 +271032,26 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": + "str", # Optional. Lifecycle status of the model + (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" + # Optional. Input/output modalities. + ], + "output": [ + "str" + # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": + 0.0, # Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. @@ -256456,6 +271061,43 @@ def list_workspaces(self, **kwargs: Any) -> JSON: are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": + [ + "str" # + Optional. Available reasoning efforts for this + model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended + thinking (Anthropic models). + "type": "str", # + Optional. Model type (chat, embedding, image, + reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": @@ -256488,6 +271130,46 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" # Optional. Task name. + }, + "models": [ + "str" + # Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # Optional. One of: none, + cheapest, fastest. + }, + "task_slug": + "str" # Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", + # Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of + the model router. + "regions": [ + "str" # Optional. Target + regions for the router. + ], + "updated_at": "2020-02-20 00:00:00", + # Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", @@ -256508,9 +271190,30 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": + {}, # Optional. Benchmark scores for this model, + stored as arbitrary JSON. + "capabilities": [ + "str" # + Optional. High-level capabilities (e.g. + tool_calling, vision, streaming). + ], + "context_window": + "str", # Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. Capabilities supported + by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The endpoint + path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": @@ -256525,10 +271228,26 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": + "str", # Optional. Lifecycle status of the model + (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" + # Optional. Input/output modalities. + ], + "output": [ + "str" + # Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": + 0.0, # Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. @@ -256538,6 +271257,43 @@ def list_workspaces(self, **kwargs: Any) -> JSON: are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": + [ + "str" # + Optional. Available reasoning efforts for this + model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, # Optional. Maximum allowed + value (for type="number"). + "min": 0.0, # Optional. Minimum allowed + value (for type="number"). + "name": "str", # Optional. Setting key name + (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. Allowed values for + dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. Step increment for + numeric settings (for type="number"). + "type": "str" # Optional. Setting value + type: "number" or "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended + thinking (Anthropic models). + "type": "str", # + Optional. Model type (chat, embedding, image, + reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": @@ -256571,6 +271327,8 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The + reasoning effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is @@ -256737,6 +271495,13 @@ def list_workspaces(self, **kwargs: Any) -> JSON: the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": + bool, # Optional. Whether reranking is enabled + for retrieval. + "model": + "str" # Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -256764,8 +271529,31 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -256780,10 +271568,25 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -256791,6 +271594,44 @@ def list_workspaces(self, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -256837,6 +271678,9 @@ def list_workspaces(self, **kwargs: Any) -> JSON: # Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The + thinking token budget for Anthropic extended thinking (0 = + disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -256879,6 +271723,13 @@ def list_workspaces(self, **kwargs: Any) -> JSON: # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value + is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The @@ -256909,6 +271760,15 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent evaluation + or model evaluation. For backwards compatibility, + UNSPECIFIED defaults to agent metrics only in list + operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", + and "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -256922,7 +271782,8 @@ def list_workspaces(self, **kwargs: Any) -> JSON: "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", - and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", + and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -257365,6 +272226,12 @@ def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -257392,6 +272259,24 @@ def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -257403,8 +272288,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -257418,16 +272322,64 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -257468,8 +272420,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -257484,10 +272459,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -257495,6 +272485,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -257525,6 +272553,48 @@ def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -257545,8 +272615,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -257561,10 +272654,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -257572,6 +272680,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -257605,6 +272751,8 @@ def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -257763,6 +272911,12 @@ def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -257790,8 +272944,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -257805,10 +272978,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -257816,6 +273004,42 @@ def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -257862,6 +273086,8 @@ def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -257901,6 +273127,12 @@ def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -257927,6 +273159,14 @@ def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -257939,8 +273179,9 @@ def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -258293,6 +273534,12 @@ def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -258320,6 +273567,24 @@ def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -258331,8 +273596,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -258346,16 +273630,64 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -258396,8 +273728,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -258412,10 +273767,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -258423,6 +273793,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -258453,6 +273861,48 @@ def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -258473,8 +273923,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -258489,10 +273962,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -258500,6 +273988,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -258533,6 +274059,8 @@ def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -258691,6 +274219,12 @@ def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -258718,8 +274252,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -258733,10 +274286,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -258744,6 +274312,42 @@ def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -258790,6 +274394,8 @@ def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -258829,6 +274435,12 @@ def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -258855,6 +274467,14 @@ def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -258867,8 +274487,9 @@ def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -259223,6 +274844,12 @@ def create_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -259250,6 +274877,24 @@ def create_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -259261,8 +274906,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -259276,16 +274940,64 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -259326,8 +275038,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -259342,10 +275077,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -259353,6 +275103,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -259383,6 +275171,48 @@ def create_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -259403,8 +275233,31 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -259419,10 +275272,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -259430,6 +275298,44 @@ def create_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -259463,6 +275369,8 @@ def create_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -259621,6 +275529,12 @@ def create_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -259648,8 +275562,27 @@ def create_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -259663,10 +275596,25 @@ def create_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -259674,6 +275622,42 @@ def create_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -259720,6 +275704,8 @@ def create_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -259759,6 +275745,12 @@ def create_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -259785,6 +275777,14 @@ def create_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -259797,8 +275797,9 @@ def create_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -260235,6 +276236,12 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -260262,6 +276269,24 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -260273,8 +276298,27 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -260288,16 +276332,64 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -260338,8 +276430,31 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -260354,10 +276469,25 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -260365,6 +276495,44 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -260395,6 +276563,48 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -260415,8 +276625,31 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -260431,10 +276664,25 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -260442,6 +276690,44 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -260475,6 +276761,8 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -260633,6 +276921,12 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -260660,8 +276954,27 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -260675,10 +276988,25 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -260686,6 +277014,42 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -260732,6 +277096,8 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -260771,6 +277137,12 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -260797,6 +277169,14 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -260809,8 +277189,9 @@ def get_workspace(self, workspace_uuid: str, **kwargs: Any) -> JSON: "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -261250,6 +277631,12 @@ def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -261277,6 +277664,24 @@ def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -261288,8 +277693,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -261303,16 +277727,64 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -261353,8 +277825,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -261369,10 +277864,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -261380,6 +277890,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -261410,6 +277958,48 @@ def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -261430,8 +278020,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -261446,10 +278059,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -261457,6 +278085,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -261490,6 +278156,8 @@ def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -261648,6 +278316,12 @@ def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -261675,8 +278349,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -261690,10 +278383,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -261701,6 +278409,42 @@ def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -261747,6 +278491,8 @@ def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -261786,6 +278532,12 @@ def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -261812,6 +278564,14 @@ def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -261824,8 +278584,9 @@ def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -262181,6 +278942,12 @@ def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -262208,6 +278975,24 @@ def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -262219,8 +279004,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -262234,16 +279038,64 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -262284,8 +279136,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -262300,10 +279175,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -262311,6 +279201,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -262341,6 +279269,48 @@ def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -262361,8 +279331,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -262377,10 +279370,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -262388,6 +279396,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -262421,6 +279467,8 @@ def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -262579,6 +279627,12 @@ def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -262606,8 +279660,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -262621,10 +279694,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -262632,6 +279720,42 @@ def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -262678,6 +279802,8 @@ def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -262717,6 +279843,12 @@ def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -262743,6 +279875,14 @@ def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -262755,8 +279895,9 @@ def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -263114,6 +280255,12 @@ def update_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -263141,6 +280288,24 @@ def update_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -263152,8 +280317,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -263167,16 +280351,64 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -263217,8 +280449,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -263233,10 +280488,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -263244,6 +280514,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -263274,6 +280582,48 @@ def update_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -263294,8 +280644,31 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -263310,10 +280683,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -263321,6 +280709,44 @@ def update_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -263354,6 +280780,8 @@ def update_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -263512,6 +280940,12 @@ def update_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -263539,8 +280973,27 @@ def update_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -263554,10 +281007,25 @@ def update_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -263565,6 +281033,42 @@ def update_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -263611,6 +281115,8 @@ def update_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -263650,6 +281156,12 @@ def update_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -263676,6 +281188,14 @@ def update_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -263688,8 +281208,9 @@ def update_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -264236,6 +281757,12 @@ def list_agents_by_workspace( "project_id": "str", # Optional. Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. Whether + reranking is enabled for retrieval. + "model": "str" # Optional. Reranker + model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -264263,6 +281790,24 @@ def list_agents_by_workspace( stream. }, "max_tokens": 0, # Optional. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional list of + allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. Optional + authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. Optional + additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A label + identifying this MCP server. + "server_url": "str" # Optional. The URL of + the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. Agreement @@ -264274,8 +281819,27 @@ def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark scores + for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level capabilities + (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context window + size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. input_text, + output_text, input_image). + ], + "endpoint": "str" # Optional. The + endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. Internally @@ -264288,14 +281852,59 @@ def list_agents_by_workspace( size limit of model. "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. Lifecycle + status of the model (internal, public-preview, active, deprecated, + end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. Input/output + modalities. + ], + "output": [ + "str" # Optional. Input/output + modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. Parameter count + in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available reasoning + efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. + String default value (for type="dropdown", e.g. "medium"). + "default_value": 0.0, # Optional. + Numeric default value (for type="number"). + "max": 0.0, # Optional. Maximum + allowed value (for type="number"). + "min": 0.0, # Optional. Minimum + allowed value (for type="number"). + "name": "str", # Optional. Setting + key name (e.g. "max_tokens", "temperature", "resolution"). + "options": [ + "str" # Optional. Allowed + values for dropdown selections (for type="dropdown"). + ], + "step": 0.0, # Optional. Step + increment for numeric settings (for type="number"). + "type": "str" # Optional. Setting + value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this model + supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model has been @@ -264333,8 +281942,27 @@ def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -264348,10 +281976,25 @@ def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -264359,6 +282002,42 @@ def list_agents_by_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -264389,6 +282068,45 @@ def list_agents_by_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. Router-level + fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", + # Optional. Short task description. + "name": "str" # + Optional. Task name. + }, + "models": [ + "str" # Optional. + Models assigned to the task. + ], + "selection_policy": { + "prefer": "str" # + Optional. One of: none, cheapest, fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # Optional. + Creation date / time. + "description": "str", # Optional. Description. + "name": "str", # Optional. Name of the model router. + "regions": [ + "str" # Optional. Target regions for the + router. + ], + "updated_at": "2020-02-20 00:00:00", # Optional. + Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # Optional. Key @@ -264409,8 +282127,27 @@ def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -264424,10 +282161,25 @@ def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -264435,6 +282187,42 @@ def list_agents_by_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -264468,6 +282256,8 @@ def list_agents_by_workspace( "project_id": "str", # Optional. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning effort + for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * RETRIEVAL_METHOD_UNKNOWN: @@ -264615,6 +282405,12 @@ def list_agents_by_workspace( List of knowledge bases associated with the agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -264642,8 +282438,27 @@ def list_agents_by_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -264657,16 +282472,64 @@ def list_agents_by_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -264710,6 +282573,8 @@ def list_agents_by_workspace( agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking token + budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -264752,6 +282617,13 @@ def list_agents_by_workspace( 00:00:00", # Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default + value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known values + are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # @@ -264782,6 +282654,16 @@ def list_agents_by_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default + value is "EVALUATION_SCOPE_UNSPECIFIED". Scope that + determines whether a metric belongs to agent + evaluation or model evaluation. For backwards + compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", + "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -264795,8 +282677,10 @@ def list_agents_by_workspace( "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -265263,6 +283147,12 @@ def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -265290,6 +283180,24 @@ def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -265301,8 +283209,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -265316,16 +283243,64 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -265366,8 +283341,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -265382,10 +283380,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -265393,6 +283406,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -265423,6 +283474,48 @@ def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -265443,8 +283536,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -265459,10 +283575,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -265470,6 +283601,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -265503,6 +283672,8 @@ def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -265661,6 +283832,12 @@ def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -265688,8 +283865,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -265703,10 +283899,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -265714,6 +283925,42 @@ def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -265760,6 +284007,8 @@ def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -265799,6 +284048,12 @@ def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -265825,6 +284080,14 @@ def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -265837,8 +284100,9 @@ def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -266194,6 +284458,12 @@ def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -266221,6 +284491,24 @@ def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -266232,8 +284520,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -266247,16 +284554,64 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -266297,8 +284652,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -266313,10 +284691,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -266324,6 +284717,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -266354,6 +284785,48 @@ def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -266374,8 +284847,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -266390,10 +284886,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -266401,6 +284912,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -266434,6 +284983,8 @@ def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -266592,6 +285143,12 @@ def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -266619,8 +285176,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -266634,10 +285210,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -266645,6 +285236,42 @@ def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -266691,6 +285318,8 @@ def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -266730,6 +285359,12 @@ def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -266756,6 +285391,14 @@ def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -266768,8 +285411,9 @@ def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -267128,6 +285772,12 @@ def update_agents_workspace( Knowledge bases. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # Optional. + Whether reranking is enabled for retrieval. + "model": "str" # Optional. + Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -267155,6 +285805,24 @@ def update_agents_workspace( the log stream. }, "max_tokens": 0, # Optional. Agents. + "mcp_servers": [ + { + "allowed_tools": [ + "str" # Optional. Optional + list of allowed tool names to expose from this server. + ], + "authorization": "str", # Optional. + Optional authorization header value for the MCP server. + "headers": { + "str": "str" # Optional. + Optional additional headers to send to the MCP server. + }, + "server_label": "str", # Optional. A + label identifying this MCP server. + "server_url": "str" # Optional. The + URL of the MCP server. + } + ], "model": { "agreement": { "description": "str", # Optional. @@ -267166,8 +285834,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. Benchmark + scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. Context + window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # Optional. + Capabilities supported by this endpoint (e.g. + input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # Optional. @@ -267181,16 +285868,64 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # Optional. + Lifecycle status of the model (internal, public-preview, active, + deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": "MODEL_PROVIDER_DIGITALOCEAN", # Optional. Default value is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # + Optional. String default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # + Optional. Numeric default value (for type="number"). + "max": 0.0, # Optional. + Maximum allowed value (for type="number"). + "min": 0.0, # Optional. + Minimum allowed value (for type="number"). + "name": "str", # Optional. + Setting key name (e.g. "max_tokens", "temperature", + "resolution"). + "options": [ + "str" # Optional. + Allowed values for dropdown selections (for + type="dropdown"). + ], + "step": 0.0, # Optional. + Step increment for numeric settings (for type="number"). + "type": "str" # Optional. + Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. Whether this + model supports extended thinking (Anthropic models). + "type": "str", # Optional. Model type (chat, + embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. Model @@ -267231,8 +285966,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -267247,10 +286005,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -267258,6 +286031,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -267288,6 +286099,48 @@ def update_agents_workspace( "updated_at": "2020-02-20 00:00:00" # Optional. Key last updated date. }, + "model_router": { + "config": { + "fallback_models": [ + "str" # Optional. + Router-level fallback models. + ], + "policies": [ + { + "custom_task": { + "description": "str", # Optional. Short task + description. + "name": "str" + # Optional. Task name. + }, + "models": [ + "str" # + Optional. Models assigned to the task. + ], + "selection_policy": { + "prefer": + "str" # Optional. One of: none, cheapest, + fastest. + }, + "task_slug": "str" # + Optional. Task slug. + } + ] + }, + "created_at": "2020-02-20 00:00:00", # + Optional. Creation date / time. + "description": "str", # Optional. + Description. + "name": "str", # Optional. Name of the model + router. + "regions": [ + "str" # Optional. Target regions for + the router. + ], + "updated_at": "2020-02-20 00:00:00", # + Optional. Last modified. + "uuid": "str" # Optional. Unique id. + }, "name": "str", # Optional. Agent name. "openai_api_key": { "created_at": "2020-02-20 00:00:00", # @@ -267308,8 +286161,31 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # + Optional. Benchmark scores for this model, stored as + arbitrary JSON. + "capabilities": [ + "str" # Optional. + High-level capabilities (e.g. tool_calling, vision, + streaming). + ], + "context_window": "str", # + Optional. Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" + # Optional. Capabilities supported by this + endpoint (e.g. input_text, output_text, + input_image). + ], + "endpoint": + "str" # Optional. The endpoint path (e.g. + /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", @@ -267324,10 +286200,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # + Optional. Input/output modalities. + ], + "output": [ + "str" # + Optional. Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # + Optional. Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -267335,6 +286226,44 @@ def update_agents_workspace( is "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. + Available reasoning efforts for this model. + ], + "settings": [ + { + "default_string": "str", # Optional. String + default value (for type="dropdown", e.g. + "medium"). + "default_value": 0.0, # Optional. Numeric + default value (for type="number"). + "max": 0.0, + # Optional. Maximum allowed value (for + type="number"). + "min": 0.0, + # Optional. Minimum allowed value (for + type="number"). + "name": + "str", # Optional. Setting key name (e.g. + "max_tokens", "temperature", "resolution"). + "options": [ + "str" + # Optional. Allowed values for dropdown + selections (for type="dropdown"). + ], + "step": 0.0, + # Optional. Step increment for numeric settings + (for type="number"). + "type": "str" + # Optional. Setting value type: "number" or + "dropdown". + } + ], + "thinking": bool, # + Optional. Whether this model supports extended thinking + (Anthropic models). + "type": "str", # Optional. + Model type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # @@ -267368,6 +286297,8 @@ def update_agents_workspace( "project_id": "str", # Optional. Agents. "provide_citations": bool, # Optional. Whether the agent should provide in-response citations. + "reasoning_effort": "str", # Optional. The reasoning + effort for the agent. "region": "str", # Optional. Region code. "retrieval_method": "RETRIEVAL_METHOD_UNKNOWN", # Optional. Default value is "RETRIEVAL_METHOD_UNKNOWN". * @@ -267526,6 +286457,12 @@ def update_agents_workspace( agent template. "region": "str", # Optional. Region code. + "reranking_config": { + "enabled": bool, # + Optional. Whether reranking is enabled for retrieval. + "model": "str" # + Optional. Reranker model internal name. + }, "tags": [ "str" # Optional. Tags to organize related resources. @@ -267553,8 +286490,27 @@ def update_agents_workspace( "uuid": "str" # Optional. Agreement Description. }, + "benchmark_score": {}, # Optional. + Benchmark scores for this model, stored as arbitrary JSON. + "capabilities": [ + "str" # Optional. High-level + capabilities (e.g. tool_calling, vision, streaming). + ], + "context_window": "str", # Optional. + Context window size in tokens. "created_at": "2020-02-20 00:00:00", # Optional. Creation date / time. + "endpoints": [ + { + "capabilities": [ + "str" # + Optional. Capabilities supported by this endpoint + (e.g. input_text, output_text, input_image). + ], + "endpoint": "str" # + Optional. The endpoint path (e.g. /chat/responses). + } + ], "inference_name": "str", # Optional. Internally used name. "inference_version": "str", # @@ -267568,10 +286524,25 @@ def update_agents_workspace( "kb_min_chunk_size": 0, # Optional. Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase. + "lifecycle_status": "str", # + Optional. Lifecycle status of the model (internal, + public-preview, active, deprecated, end_of_life). "metadata": {}, # Optional. Additional meta data. + "modalities": { + "input": [ + "str" # Optional. + Input/output modalities. + ], + "output": [ + "str" # Optional. + Input/output modalities. + ] + }, "name": "str", # Optional. Name of the model. + "parameter_count": 0.0, # Optional. + Parameter count in billions. "parent_uuid": "str", # Optional. Unique id of the model, this model is based on. "provider": @@ -267579,6 +286550,42 @@ def update_agents_workspace( "MODEL_PROVIDER_DIGITALOCEAN". Known values are: "MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", and "MODEL_PROVIDER_OPENAI". + "reasoning_efforts": [ + "str" # Optional. Available + reasoning efforts for this model. + ], + "settings": [ + { + "default_string": + "str", # Optional. String default value (for + type="dropdown", e.g. "medium"). + "default_value": 0.0, + # Optional. Numeric default value (for + type="number"). + "max": 0.0, # + Optional. Maximum allowed value (for type="number"). + "min": 0.0, # + Optional. Minimum allowed value (for type="number"). + "name": "str", # + Optional. Setting key name (e.g. "max_tokens", + "temperature", "resolution"). + "options": [ + "str" # + Optional. Allowed values for dropdown selections + (for type="dropdown"). + ], + "step": 0.0, # + Optional. Step increment for numeric settings (for + type="number"). + "type": "str" # + Optional. Setting value type: "number" or "dropdown". + } + ], + "thinking": bool, # Optional. + Whether this model supports extended thinking (Anthropic + models). + "type": "str", # Optional. Model + type (chat, embedding, image, reasoning, coding). "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. "upload_complete": bool, # Optional. @@ -267625,6 +286632,8 @@ def update_agents_workspace( Optional. The agent template's last updated date. "uuid": "str" # Optional. Unique id. }, + "thinking_token_budget": 0, # Optional. The thinking + token budget for Anthropic extended thinking (0 = disabled). "top_p": 0.0, # Optional. Agents. "updated_at": "2020-02-20 00:00:00", # Optional. Last modified. @@ -267664,6 +286673,12 @@ def update_agents_workspace( Optional. Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": + "EVALUATION_DATASET_TYPE_UNKNOWN", # Optional. Default value is + "EVALUATION_DATASET_TYPE_UNKNOWN". Known values are: + "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", + "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of @@ -267690,6 +286705,14 @@ def update_agents_workspace( "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. Evaluations. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether + a metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent + metrics only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. @@ -267702,8 +286725,9 @@ def update_agents_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: - "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", and - "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", + "METRIC_TYPE_RAG_AND_TOOL", "METRIC_TYPE_MODEL_QUALITY", and + "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. Evaluations. "metric_value_type": @@ -267885,6 +286909,11 @@ def list_evaluation_test_cases_by_workspace( Time created at. "dataset_name": "str", # Optional. Name of the dataset. + "dataset_type": "EVALUATION_DATASET_TYPE_UNKNOWN", # + Optional. Default value is "EVALUATION_DATASET_TYPE_UNKNOWN". Known + values are: "EVALUATION_DATASET_TYPE_UNKNOWN", + "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK", and + "EVALUATION_DATASET_TYPE_MODEL". "dataset_uuid": "str", # Optional. UUID of the dataset. "file_size": "str", # Optional. The size of the @@ -267908,6 +286937,14 @@ def list_evaluation_test_cases_by_workspace( "METRIC_CATEGORY_CONTEXT_QUALITY", and "METRIC_CATEGORY_MODEL_FIT". "description": "str", # Optional. + "evaluation_scope": + "EVALUATION_SCOPE_UNSPECIFIED", # Optional. Default value is + "EVALUATION_SCOPE_UNSPECIFIED". Scope that determines whether a + metric belongs to agent evaluation or model evaluation. For + backwards compatibility, UNSPECIFIED defaults to agent metrics + only in list operations. Known values are: + "EVALUATION_SCOPE_UNSPECIFIED", "EVALUATION_SCOPE_AGENT", and + "EVALUATION_SCOPE_MODEL". "inverted": bool, # Optional. If true, the metric is inverted, meaning that a lower value is better. "is_metric_goal": bool, # Optional. @@ -267916,7 +286953,8 @@ def list_evaluation_test_cases_by_workspace( "metric_type": "METRIC_TYPE_UNSPECIFIED", # Optional. Default value is "METRIC_TYPE_UNSPECIFIED". Known values are: "METRIC_TYPE_UNSPECIFIED", - "METRIC_TYPE_GENERAL_QUALITY", and "METRIC_TYPE_RAG_AND_TOOL". + "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL", + "METRIC_TYPE_MODEL_QUALITY", and "METRIC_TYPE_MODEL_SAFETY". "metric_uuid": "str", # Optional. "metric_value_type": "METRIC_VALUE_TYPE_UNSPECIFIED", # Optional. Default value is