Skip to content

Commit 3596291

Browse files
Use default 88 line length in ruff formatting
1 parent 6dc6c0c commit 3596291

File tree

20 files changed

+1091
-361
lines changed

20 files changed

+1091
-361
lines changed

libs/oci/langchain_oci/chat_models/oci_data_science.py

Lines changed: 50 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,11 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
138138
AIMessage(
139139
content="Bonjour le monde!",
140140
response_metadata={
141-
"token_usage": {"prompt_tokens": 40, "total_tokens": 50, "completion_tokens": 10},
141+
"token_usage": {
142+
"prompt_tokens": 40,
143+
"total_tokens": 50,
144+
"completion_tokens": 10,
145+
},
142146
"model_name": "odsc-llm",
143147
"system_fingerprint": "",
144148
"finish_reason": "stop",
@@ -194,11 +198,16 @@ class Joke(BaseModel):
194198
195199
196200
structured_llm = chat.with_structured_output(Joke, method="json_mode")
197-
structured_llm.invoke("Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys")
201+
structured_llm.invoke(
202+
"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
203+
)
198204
199205
.. code-block:: python
200206
201-
Joke(setup="Why did the cat get stuck in the tree?", punchline="Because it was chasing its tail!")
207+
Joke(
208+
setup="Why did the cat get stuck in the tree?",
209+
punchline="Because it was chasing its tail!",
210+
)
202211
203212
See ``ChatOCIModelDeployment.with_structured_output()`` for more.
204213
@@ -240,7 +249,11 @@ def _construct_json_body(self, messages: list, params: dict) -> dict:
240249
.. code-block:: python
241250
242251
{
243-
"token_usage": {"prompt_tokens": 40, "total_tokens": 50, "completion_tokens": 10},
252+
"token_usage": {
253+
"prompt_tokens": 40,
254+
"total_tokens": 50,
255+
"completion_tokens": 10,
256+
},
244257
"model_name": "odsc-llm",
245258
"system_fingerprint": "",
246259
"finish_reason": "stop",
@@ -264,7 +277,8 @@ def validate_openai(cls, values: Any) -> Any:
264277
"""Checks if langchain_openai is installed."""
265278
if not importlib.util.find_spec("langchain_openai"):
266279
raise ImportError(
267-
"Could not import langchain_openai package. Please install it with `pip install langchain_openai`."
280+
"Could not import langchain_openai package. "
281+
"Please install it with `pip install langchain_openai`."
268282
)
269283
return values
270284

@@ -291,7 +305,9 @@ def _default_params(self) -> Dict[str, Any]:
291305
"stream": self.streaming,
292306
}
293307

294-
def _headers(self, is_async: Optional[bool] = False, body: Optional[dict] = None) -> Dict:
308+
def _headers(
309+
self, is_async: Optional[bool] = False, body: Optional[dict] = None
310+
) -> Dict:
295311
"""Construct and return the headers for a request.
296312
297313
Args:
@@ -343,13 +359,17 @@ def _generate(
343359
response = chat.invoke(messages)
344360
""" # noqa: E501
345361
if self.streaming:
346-
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager, **kwargs)
362+
stream_iter = self._stream(
363+
messages, stop=stop, run_manager=run_manager, **kwargs
364+
)
347365
return generate_from_stream(stream_iter)
348366

349367
requests_kwargs = kwargs.pop("requests_kwargs", {})
350368
params = self._invocation_params(stop, **kwargs)
351369
body = self._construct_json_body(messages, params)
352-
res = self.completion_with_retry(data=body, run_manager=run_manager, **requests_kwargs)
370+
res = self.completion_with_retry(
371+
data=body, run_manager=run_manager, **requests_kwargs
372+
)
353373
return self._process_response(res.json())
354374

355375
def _stream(
@@ -397,7 +417,9 @@ def _stream(
397417
params = self._invocation_params(stop, **kwargs)
398418
body = self._construct_json_body(messages, params) # request json body
399419

400-
response = self.completion_with_retry(data=body, run_manager=run_manager, stream=True, **requests_kwargs)
420+
response = self.completion_with_retry(
421+
data=body, run_manager=run_manager, stream=True, **requests_kwargs
422+
)
401423
default_chunk_class = AIMessageChunk
402424
for line in self._parse_stream(response.iter_lines()):
403425
chunk = self._handle_sse_line(line, default_chunk_class)
@@ -447,7 +469,9 @@ async def _agenerate(
447469
448470
""" # noqa: E501
449471
if self.streaming:
450-
stream_iter = self._astream(messages, stop=stop, run_manager=run_manager, **kwargs)
472+
stream_iter = self._astream(
473+
messages, stop=stop, run_manager=run_manager, **kwargs
474+
)
451475
return await agenerate_from_stream(stream_iter)
452476

453477
requests_kwargs = kwargs.pop("requests_kwargs", {})
@@ -571,14 +595,19 @@ def with_structured_output(
571595
else JsonOutputParser()
572596
)
573597
else:
574-
raise ValueError(f"Unrecognized method argument. Expected `json_mode`.Received: `{method}`.")
598+
raise ValueError(
599+
f"Unrecognized method argument. Expected `json_mode`."
600+
f"Received: `{method}`."
601+
)
575602

576603
if include_raw:
577604
parser_assign = RunnablePassthrough.assign(
578605
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
579606
)
580607
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
581-
parser_with_fallback = parser_assign.with_fallbacks([parser_none], exception_key="parsing_error")
608+
parser_with_fallback = parser_assign.with_fallbacks(
609+
[parser_none], exception_key="parsing_error"
610+
)
582611
return RunnableMap(raw=llm) | parser_with_fallback
583612
else:
584613
return llm | output_parser
@@ -661,7 +690,9 @@ def _process_stream_response(
661690
if not isinstance(choice, dict):
662691
raise TypeError("Endpoint response is not well formed.")
663692
except (KeyError, IndexError, TypeError) as e:
664-
raise ValueError("Error while formatting response payload for chat model of type") from e
693+
raise ValueError(
694+
"Error while formatting response payload for chat model of type"
695+
) from e
665696

666697
chunk = _convert_delta_to_message_chunk(choice["delta"], default_chunk_cls)
667698
default_chunk_cls = chunk.__class__
@@ -673,7 +704,9 @@ def _process_stream_response(
673704
if usage is not None:
674705
gen_info.update({"usage": usage})
675706

676-
return ChatGenerationChunk(message=chunk, generation_info=gen_info if gen_info else None)
707+
return ChatGenerationChunk(
708+
message=chunk, generation_info=gen_info if gen_info else None
709+
)
677710

678711
def _process_response(self, response_json: dict) -> ChatResult:
679712
"""Formats response in OpenAI spec.
@@ -698,7 +731,9 @@ def _process_response(self, response_json: dict) -> ChatResult:
698731
if not isinstance(choices, list):
699732
raise TypeError("Endpoint response is not well formed.")
700733
except (KeyError, TypeError) as e:
701-
raise ValueError("Error while formatting response payload for chat model of type") from e
734+
raise ValueError(
735+
"Error while formatting response payload for chat model of type"
736+
) from e
702737

703738
for choice in choices:
704739
message = _convert_dict_to_message(choice["message"])

0 commit comments

Comments
 (0)