Skip to content

Commit 12f96c8

Browse files
committed
Fix code formatting for line length compliance
1 parent 3783675 commit 12f96c8

File tree

1 file changed

+38
-25
lines changed

1 file changed

+38
-25
lines changed

libs/oci/langchain_oci/chat_models/oci_generative_ai.py

Lines changed: 38 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -247,8 +247,13 @@ def chat_generation_info(self, response: Any) -> Dict[str, Any]:
247247
}
248248

249249
# Include token usage if available
250-
if hasattr(response.data.chat_response, "usage") and response.data.chat_response.usage:
251-
generation_info["total_tokens"] = response.data.chat_response.usage.total_tokens
250+
if (
251+
hasattr(response.data.chat_response, "usage")
252+
and response.data.chat_response.usage
253+
):
254+
generation_info["total_tokens"] = (
255+
response.data.chat_response.usage.total_tokens
256+
)
252257

253258
# Include tool calls if available
254259
if self.chat_tool_calls(response):
@@ -629,9 +634,14 @@ def chat_generation_info(self, response: Any) -> Dict[str, Any]:
629634
}
630635

631636
# Include token usage if available
632-
if hasattr(response.data.chat_response, "usage") and response.data.chat_response.usage:
633-
generation_info["total_tokens"] = response.data.chat_response.usage.total_tokens
634-
637+
if (
638+
hasattr(response.data.chat_response, "usage")
639+
and response.data.chat_response.usage
640+
):
641+
generation_info["total_tokens"] = (
642+
response.data.chat_response.usage.total_tokens
643+
)
644+
635645
if self.chat_tool_calls(response):
636646
generation_info["tool_calls"] = self.format_response_tool_calls(
637647
self.chat_tool_calls(response)
@@ -777,8 +787,7 @@ def messages_to_oci_params(
777787
# continue calling tools even after receiving results.
778788

779789
def _should_allow_more_tool_calls(
780-
messages: List[BaseMessage],
781-
max_tool_calls: int
790+
messages: List[BaseMessage], max_tool_calls: int
782791
) -> bool:
783792
"""
784793
Determine if the model should be allowed to call more tools.
@@ -794,10 +803,7 @@ def _should_allow_more_tool_calls(
794803
max_tool_calls: Maximum number of tool calls before forcing stop
795804
"""
796805
# Count total tool calls made so far
797-
tool_call_count = sum(
798-
1 for msg in messages
799-
if isinstance(msg, ToolMessage)
800-
)
806+
tool_call_count = sum(1 for msg in messages if isinstance(msg, ToolMessage))
801807

802808
# Safety limit: prevent runaway tool calling
803809
if tool_call_count >= max_tool_calls:
@@ -806,12 +812,12 @@ def _should_allow_more_tool_calls(
806812
# Detect infinite loop: same tool called with same arguments in succession
807813
recent_calls = []
808814
for msg in reversed(messages):
809-
if hasattr(msg, 'tool_calls') and msg.tool_calls:
815+
if hasattr(msg, "tool_calls") and msg.tool_calls:
810816
for tc in msg.tool_calls:
811817
# Create signature: (tool_name, sorted_args)
812818
try:
813-
args_str = json.dumps(tc.get('args', {}), sort_keys=True)
814-
signature = (tc.get('name', ''), args_str)
819+
args_str = json.dumps(tc.get("args", {}), sort_keys=True)
820+
signature = (tc.get("name", ""), args_str)
815821

816822
# Check if this exact call was made in last 2 calls
817823
if signature in recent_calls[-2:]:
@@ -1153,9 +1159,7 @@ def _prepare_request(
11531159
) from ex
11541160

11551161
oci_params = self._provider.messages_to_oci_params(
1156-
messages,
1157-
max_sequential_tool_calls=self.max_sequential_tool_calls,
1158-
**kwargs
1162+
messages, max_sequential_tool_calls=self.max_sequential_tool_calls, **kwargs
11591163
)
11601164

11611165
oci_params["is_stream"] = stream
@@ -1165,12 +1169,17 @@ def _prepare_request(
11651169
_model_kwargs[self._provider.stop_sequence_key] = stop
11661170

11671171
# Warn if using max_tokens with OpenAI models
1168-
if self.model_id and self.model_id.startswith("openai.") and "max_tokens" in _model_kwargs:
1172+
if (
1173+
self.model_id
1174+
and self.model_id.startswith("openai.")
1175+
and "max_tokens" in _model_kwargs
1176+
):
11691177
import warnings
1178+
11701179
warnings.warn(
11711180
f"OpenAI models require 'max_completion_tokens' instead of 'max_tokens'.",
11721181
UserWarning,
1173-
stacklevel=2
1182+
stacklevel=2,
11741183
)
11751184

11761185
chat_params = {**_model_kwargs, **kwargs, **oci_params}
@@ -1234,7 +1243,11 @@ def bind_tools(
12341243

12351244
# Add parallel tool calls support
12361245
# Use bind-time parameter if provided, else fall back to class default
1237-
use_parallel = parallel_tool_calls if parallel_tool_calls is not None else self.parallel_tool_calls
1246+
use_parallel = (
1247+
parallel_tool_calls
1248+
if parallel_tool_calls is not None
1249+
else self.parallel_tool_calls
1250+
)
12381251
if use_parallel:
12391252
kwargs["is_parallel_tool_calls"] = True
12401253

@@ -1267,7 +1280,7 @@ def with_structured_output(
12671280
used. Note that if using "json_mode" then you must include instructions
12681281
for formatting the output into the desired schema into the model call.
12691282
If "json_schema" then it allows the user to pass a json schema (or pydantic)
1270-
to the model for structured output.
1283+
to the model for structured output.
12711284
include_raw:
12721285
If False then only the parsed structured output is returned. If
12731286
an error occurs during model output parsing it will be raised. If True
@@ -1323,18 +1336,18 @@ def with_structured_output(
13231336
if is_pydantic_schema
13241337
else schema
13251338
)
1326-
1339+
13271340
response_json_schema = self._provider.oci_response_json_schema(
13281341
name=json_schema_dict.get("title", "response"),
13291342
description=json_schema_dict.get("description", ""),
13301343
schema=json_schema_dict,
1331-
is_strict=True
1344+
is_strict=True,
13321345
)
1333-
1346+
13341347
response_format_obj = self._provider.oci_json_schema_response_format(
13351348
json_schema=response_json_schema
13361349
)
1337-
1350+
13381351
llm = self.bind(response_format=response_format_obj)
13391352
if is_pydantic_schema:
13401353
output_parser = PydanticOutputParser(pydantic_object=schema)

0 commit comments

Comments
 (0)