Skip to content

Commit cc28616

Browse files
authored
Merge branch 'main' into dev
2 parents 4489b6a + 11fd69d commit cc28616

File tree

9 files changed

+44
-24
lines changed

9 files changed

+44
-24
lines changed

.buildkite/test-pipeline.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -546,8 +546,11 @@ steps:
546546

547547
- label: Model Executor Test # 23min
548548
timeout_in_minutes: 35
549+
torch_nightly: true
549550
mirror_hardwares: [amdexperimental]
550551
source_file_dependencies:
552+
- vllm/engine/arg_utils.py
553+
- vllm/config/model.py
551554
- vllm/model_executor
552555
- tests/model_executor
553556
- tests/entrypoints/openai/test_tensorizer_entrypoint.py

tests/entrypoints/openai/test_translation_validation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ async def test_basic_audio_with_lora(mary_had_lamb):
7979
temperature=0.0,
8080
)
8181
out = json.loads(translation)["text"].strip().lower()
82-
assert "mary tenía un pequeño cordero" in out
82+
assert "pequeño" in out.split(" ")
8383

8484

8585
# NOTE: (NickLucche) the large-v3-turbo model was not trained on translation!

tests/model_executor/model_loader/runai_model_streamer/test_runai_model_streamer_loader.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,16 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

4+
import pytest
5+
46
from vllm import SamplingParams
57
from vllm.config.load import LoadConfig
68
from vllm.model_executor.model_loader import get_model_loader
79

810
load_format = "runai_streamer"
911
test_model = "openai-community/gpt2"
12+
# TODO(amacaskill): Replace with a GKE owned GCS bucket.
13+
test_gcs_model = "gs://vertex-model-garden-public-us/codegemma/codegemma-2b/"
1014

1115
prompts = [
1216
"Hello, my name is",
@@ -32,3 +36,16 @@ def test_runai_model_loader_download_files(vllm_runner):
3236
with vllm_runner(test_model, load_format=load_format) as llm:
3337
deserialized_outputs = llm.generate(prompts, sampling_params)
3438
assert deserialized_outputs
39+
40+
41+
def test_runai_model_loader_download_files_gcs(
42+
vllm_runner, monkeypatch: pytest.MonkeyPatch
43+
):
44+
monkeypatch.setenv("GOOGLE_CLOUD_PROJECT", "fake-project")
45+
monkeypatch.setenv("RUNAI_STREAMER_GCS_USE_ANONYMOUS_CREDENTIALS", "true")
46+
monkeypatch.setenv(
47+
"CLOUD_STORAGE_EMULATOR_ENDPOINT", "https://storage.googleapis.com"
48+
)
49+
with vllm_runner(test_gcs_model, load_format=load_format) as llm:
50+
deserialized_outputs = llm.generate(prompts, sampling_params)
51+
assert deserialized_outputs

vllm/config/compilation.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -251,13 +251,6 @@ class CompilationConfig:
251251
disabled when running with Inductor: mode>=VLLM_COMPILE and use_inductor=True.
252252
Inductor generates (fused) Triton kernels for disabled custom ops."""
253253
splitting_ops: list[str] | None = None
254-
255-
"""
256-
Provide control over whether to compile the multimodal encoder
257-
such as Qwen2_5_vl
258-
"""
259-
compile_mm_encoder: bool = True
260-
261254
"""A list of ops to exclude from cudagraphs, used in piecewise compilation.
262255
263256
The behavior depends on use_inductor_graph_partition:
@@ -275,6 +268,9 @@ class CompilationConfig:
275268
276269
If None, defaults to attention ops for piecewise cudagraphs.
277270
If empty list [], no ops are excluded (suitable for full cudagraphs)."""
271+
compile_mm_encoder: bool = True
272+
"""Whether or not to compile the multimodal encoder.
273+
Currently, this only works for `Qwen2_5_vl`."""
278274

279275
# Inductor capture
280276
use_inductor: bool | None = None

vllm/model_executor/layers/quantization/mxfp4.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
from vllm.model_executor.layers.quantization.utils.mxfp4_utils import (
4444
_can_support_mxfp4,
4545
_swizzle_mxfp4,
46+
get_padding_alignment,
4647
)
4748
from vllm.model_executor.layers.quantization.utils.quant_utils import is_layer_skipped
4849
from vllm.model_executor.utils import set_weight_attrs
@@ -282,10 +283,11 @@ def create_weights(
282283
)
283284
hidden_size = round_up(hidden_size, 128)
284285
elif current_platform.is_rocm():
286+
pad_align = get_padding_alignment()
285287
intermediate_size_per_partition_after_pad = round_up(
286-
intermediate_size_per_partition, 256
288+
intermediate_size_per_partition, pad_align
287289
)
288-
hidden_size = round_up(hidden_size, 256)
290+
hidden_size = round_up(hidden_size, pad_align)
289291
else:
290292
intermediate_size_per_partition_after_pad = round_up(
291293
intermediate_size_per_partition, 64

vllm/model_executor/layers/quantization/utils/mxfp4_utils.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
from vllm.logger import init_logger
99
from vllm.platforms import current_platform
10+
from vllm.triton_utils import triton
1011
from vllm.utils.torch_utils import direct_register_custom_op, is_torch_equal_or_newer
1112

1213
logger = init_logger(__name__)
@@ -99,6 +100,14 @@ def _can_support_mxfp4(
99100
)
100101

101102

103+
def get_padding_alignment():
104+
return (
105+
256
106+
if triton.runtime.driver.active.get_current_target().arch in ("gfx950",)
107+
else 128
108+
)
109+
110+
102111
def _dequant_mxfp4(
103112
x: torch.Tensor, scale: torch.Tensor, float_dtype: torch.dtype
104113
) -> torch.Tensor:

vllm/model_executor/models/qwen2_5_vl.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,7 @@
6767
from vllm.model_executor.layers.quantization import QuantizationConfig
6868
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
6969
from vllm.model_executor.models.module_mapping import MultiModelKeys
70-
from vllm.model_executor.models.transformers.utils import (
71-
should_torch_compile_mm_vit,
72-
)
70+
from vllm.model_executor.models.vision import should_torch_compile_mm_vit
7371
from vllm.multimodal import MULTIMODAL_REGISTRY
7472
from vllm.multimodal.evs import (
7573
compute_mrope_for_media,

vllm/model_executor/models/transformers/utils.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -205,14 +205,3 @@ def can_enable_torch_compile(vllm_config: "VllmConfig") -> bool:
205205
# Dynamic rope scaling is not compatible with torch.compile
206206
rope_scaling: dict = getattr(text_config, "rope_scaling", None) or {}
207207
return rope_scaling.get("rope_type") != "dynamic"
208-
209-
210-
def should_torch_compile_mm_vit(vllm_config: "VllmConfig") -> bool:
211-
"""
212-
Callable to be passed to `@support_torch_compile`'s `enable_if` argument.
213-
214-
Defaults to `True` but is disabled in the following situations:
215-
216-
- The model uses dynamic rope scaling.
217-
"""
218-
return vllm_config.compilation_config.compile_mm_encoder

vllm/model_executor/models/vision.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from transformers import PretrainedConfig
1212

1313
from vllm.attention.backends.registry import _Backend
14+
from vllm.config import VllmConfig
1415
from vllm.distributed import (
1516
get_tensor_model_parallel_rank,
1617
get_tensor_model_parallel_world_size,
@@ -100,6 +101,11 @@ def get_vit_attn_backend(
100101
return current_platform.get_vit_attn_backend(head_size, dtype)
101102

102103

104+
def should_torch_compile_mm_vit(vllm_config: VllmConfig) -> bool:
105+
"""Callable to be passed to `@support_torch_compile`'s `enable_if` argument."""
106+
return vllm_config.compilation_config.compile_mm_encoder
107+
108+
103109
VisionFeatureSelectStrategyStr = Literal["class", "default", "full"]
104110

105111
VisionFeatureSelectStrategy: TypeAlias = (

0 commit comments

Comments
 (0)