@@ -197,3 +197,134 @@ def tool1(x: int) -> int:
197197 stream = False ,
198198 ** llm_with_tools .kwargs
199199 )
200+
201+
202+ @pytest .mark .requires ("oci" )
203+ def test_version_filter_llama_3_0_blocked ():
204+ """Test that Llama 3.0 models are blocked from parallel tool calling."""
205+ oci_gen_ai_client = MagicMock ()
206+ llm = ChatOCIGenAI (
207+ model_id = "meta.llama-3-70b-instruct" ,
208+ client = oci_gen_ai_client
209+ )
210+
211+ def tool1 (x : int ) -> int :
212+ """Tool 1."""
213+ return x + 1
214+
215+ # Should raise ValueError when trying to enable parallel tool calling
216+ with pytest .raises (ValueError , match = "Llama 3.3\\ +.*Llama 4\\ +" ):
217+ llm .bind_tools ([tool1 ], parallel_tool_calls = True )
218+
219+
220+ @pytest .mark .requires ("oci" )
221+ def test_version_filter_llama_3_1_blocked ():
222+ """Test that Llama 3.1 models are blocked from parallel tool calling."""
223+ oci_gen_ai_client = MagicMock ()
224+ llm = ChatOCIGenAI (
225+ model_id = "meta.llama-3.1-70b-instruct" ,
226+ client = oci_gen_ai_client
227+ )
228+
229+ def tool1 (x : int ) -> int :
230+ """Tool 1."""
231+ return x + 1
232+
233+ # Should raise ValueError
234+ with pytest .raises (ValueError , match = "Llama 3.3\\ +.*Llama 4\\ +" ):
235+ llm .bind_tools ([tool1 ], parallel_tool_calls = True )
236+
237+
238+ @pytest .mark .requires ("oci" )
239+ def test_version_filter_llama_3_2_blocked ():
240+ """Test that Llama 3.2 models are blocked from parallel tool calling."""
241+ oci_gen_ai_client = MagicMock ()
242+ llm = ChatOCIGenAI (
243+ model_id = "meta.llama-3.2-11b-vision-instruct" ,
244+ client = oci_gen_ai_client
245+ )
246+
247+ def tool1 (x : int ) -> int :
248+ """Tool 1."""
249+ return x + 1
250+
251+ # Should raise ValueError
252+ with pytest .raises (ValueError , match = "Llama 3.3\\ +.*Llama 4\\ +" ):
253+ llm .bind_tools ([tool1 ], parallel_tool_calls = True )
254+
255+
256+ @pytest .mark .requires ("oci" )
257+ def test_version_filter_llama_3_3_allowed ():
258+ """Test that Llama 3.3 models are allowed parallel tool calling."""
259+ oci_gen_ai_client = MagicMock ()
260+ llm = ChatOCIGenAI (
261+ model_id = "meta.llama-3.3-70b-instruct" ,
262+ client = oci_gen_ai_client
263+ )
264+
265+ def tool1 (x : int ) -> int :
266+ """Tool 1."""
267+ return x + 1
268+
269+ # Should NOT raise ValueError
270+ llm_with_tools = llm .bind_tools ([tool1 ], parallel_tool_calls = True )
271+ assert llm_with_tools .kwargs .get ("is_parallel_tool_calls" ) is True
272+
273+
274+ @pytest .mark .requires ("oci" )
275+ def test_version_filter_llama_4_allowed ():
276+ """Test that Llama 4 models are allowed parallel tool calling."""
277+ oci_gen_ai_client = MagicMock ()
278+ llm = ChatOCIGenAI (
279+ model_id = "meta.llama-4-maverick-17b-128e-instruct-fp8" ,
280+ client = oci_gen_ai_client
281+ )
282+
283+ def tool1 (x : int ) -> int :
284+ """Tool 1."""
285+ return x + 1
286+
287+ # Should NOT raise ValueError
288+ llm_with_tools = llm .bind_tools ([tool1 ], parallel_tool_calls = True )
289+ assert llm_with_tools .kwargs .get ("is_parallel_tool_calls" ) is True
290+
291+
292+ @pytest .mark .requires ("oci" )
293+ def test_version_filter_other_models_allowed ():
294+ """Test that other GenericChatRequest models are allowed parallel tool calling."""
295+ oci_gen_ai_client = MagicMock ()
296+
297+ # Test with xAI Grok
298+ llm_grok = ChatOCIGenAI (
299+ model_id = "xai.grok-4-fast" ,
300+ client = oci_gen_ai_client
301+ )
302+
303+ def tool1 (x : int ) -> int :
304+ """Tool 1."""
305+ return x + 1
306+
307+ # Should NOT raise ValueError for Grok
308+ llm_with_tools = llm_grok .bind_tools ([tool1 ], parallel_tool_calls = True )
309+ assert llm_with_tools .kwargs .get ("is_parallel_tool_calls" ) is True
310+
311+
312+ @pytest .mark .requires ("oci" )
313+ def test_version_filter_supports_parallel_tool_calls_method ():
314+ """Test the _supports_parallel_tool_calls method directly."""
315+ oci_gen_ai_client = MagicMock ()
316+ llm = ChatOCIGenAI (
317+ model_id = "meta.llama-3.3-70b-instruct" ,
318+ client = oci_gen_ai_client
319+ )
320+
321+ # Test various model IDs
322+ assert llm ._supports_parallel_tool_calls ("meta.llama-4-maverick-17b-128e-instruct-fp8" ) is True
323+ assert llm ._supports_parallel_tool_calls ("meta.llama-3.3-70b-instruct" ) is True
324+ assert llm ._supports_parallel_tool_calls ("meta.llama-3.2-11b-vision-instruct" ) is False
325+ assert llm ._supports_parallel_tool_calls ("meta.llama-3.1-70b-instruct" ) is False
326+ assert llm ._supports_parallel_tool_calls ("meta.llama-3-70b-instruct" ) is False
327+ assert llm ._supports_parallel_tool_calls ("cohere.command-r-plus" ) is False
328+ assert llm ._supports_parallel_tool_calls ("xai.grok-4-fast" ) is True
329+ assert llm ._supports_parallel_tool_calls ("openai.gpt-4" ) is True
330+ assert llm ._supports_parallel_tool_calls ("mistral.mistral-large" ) is True
0 commit comments