From 5268c2e4f394e82d3d6fdf776eb1d21f2478e38f Mon Sep 17 00:00:00 2001 From: Anthony Casagrande Date: Thu, 6 Nov 2025 12:23:50 -0800 Subject: [PATCH] test: add tests for image batch size --- .../endpoints/test_openai_chat_completions.py | 50 +++++++++++++++++++ tests/integration/test_multimodal.py | 49 ++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/tests/endpoints/test_openai_chat_completions.py b/tests/endpoints/test_openai_chat_completions.py index e383fd60b..93b6265e1 100644 --- a/tests/endpoints/test_openai_chat_completions.py +++ b/tests/endpoints/test_openai_chat_completions.py @@ -191,3 +191,53 @@ def test_create_messages_audio_format_error( turns = [turn] with pytest.raises(ValueError): endpoint._create_messages(turns) + + def test_format_payload_with_multiple_images_batch( + self, model_endpoint, sample_conversations + ): + """Test that image batch_size > 1 creates multiple image_url entries.""" + endpoint = ChatEndpoint(model_endpoint) + turn = sample_conversations["session_1"].turns[0] + # Simulate batch_size=3 by adding 3 images to contents + turn.images = [ + type( + "Image", + (), + { + "contents": [ + "http://image.url/img1.png", + "http://image.url/img2.png", + "http://image.url/img3.png", + ] + }, + )() + ] + turns = [turn] + request_info = RequestInfo(model_endpoint=model_endpoint, turns=turns) + payload = endpoint.format_payload(request_info) + + expected_payload = { + "messages": [ + { + "role": turn.role or "user", + "content": [ + {"type": "text", "text": "Hello, world!"}, + { + "type": "image_url", + "image_url": {"url": "http://image.url/img1.png"}, + }, + { + "type": "image_url", + "image_url": {"url": "http://image.url/img2.png"}, + }, + { + "type": "image_url", + "image_url": {"url": "http://image.url/img3.png"}, + }, + ], + } + ], + "model": "test-model", + "stream": False, + } + assert payload == expected_payload diff --git a/tests/integration/test_multimodal.py b/tests/integration/test_multimodal.py index 5f725cc75..60da9f6a0 100644 --- a/tests/integration/test_multimodal.py +++ b/tests/integration/test_multimodal.py @@ -73,3 +73,52 @@ async def test_images_and_audio( assert result.request_count == defaults.request_count assert result.has_input_images assert result.has_input_audio + + async def test_image_batch_size( + self, cli: AIPerfCLI, aiperf_mock_server: AIPerfMockServer + ): + """Test that --batch-size-image produces correct number of images per turn.""" + batch_size = 3 + result = await cli.run( + f""" + aiperf profile \ + --model {defaults.model} \ + --url {aiperf_mock_server.url} \ + --endpoint-type chat \ + --request-count {defaults.request_count} \ + --concurrency {defaults.concurrency} \ + --image-width-mean 64 \ + --image-height-mean 64 \ + --batch-size-image {batch_size} \ + --workers-max {defaults.workers_max} \ + --ui {defaults.ui} + """ + ) + assert result.request_count == defaults.request_count + assert result.has_input_images + + # Verify inputs.json contains the correct number of images per turn + assert result.inputs is not None, "inputs.json should exist" + assert result.inputs.data, "inputs.json should contain data" + + for session in result.inputs.data: + assert session.payloads, "session should have payloads" + for payload in session.payloads: + # Check OpenAI message format + messages = payload.get("messages", []) + assert messages, "payload should have messages" + + for message in messages: + content = message.get("content", []) + if isinstance(content, list): + # Count image_url entries in the content array + image_count = sum( + 1 + for item in content + if isinstance(item, dict) + and item.get("type") == "image_url" + ) + # Each turn should have exactly batch_size images + assert image_count == batch_size, ( + f"Expected {batch_size} images per turn, got {image_count}" + )