Skip to content

Commit 4df238b

Browse files
committed
Fix isort, yapf, and ruff formatting issues
1 parent 286f414 commit 4df238b

File tree

7 files changed

+9
-10
lines changed

7 files changed

+9
-10
lines changed

examples/offline_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

4-
import tpu_inference.envs as envs
54
import vllm.envs as vllm_envs
65
from vllm import LLM, EngineArgs
76
from vllm.utils.argparse_utils import FlexibleArgumentParser
87

8+
import tpu_inference.envs as envs
99
from tpu_inference.core import disagg_utils
1010

1111

examples/offline_lora_inference.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,13 @@
33

44
import time
55

6-
import tpu_inference.envs as envs
76
import vllm.envs as vllm_envs
87
from vllm import LLM, EngineArgs
98
from vllm.lora.request import LoRARequest
109
from vllm.utils.argparse_utils import FlexibleArgumentParser
1110

11+
import tpu_inference.envs as envs
12+
1213

1314
def create_parser():
1415
parser = FlexibleArgumentParser()

examples/offline_safety_model_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,11 @@
1818
--max-num_batched_tokens=4096
1919
"""
2020

21-
import tpu_inference.envs as envs
2221
import vllm.envs as vllm_envs
2322
from vllm import LLM, EngineArgs
2423
from vllm.utils.argparse_utils import FlexibleArgumentParser
2524

25+
import tpu_inference.envs as envs
2626
from tpu_inference.core import disagg_utils
2727

2828

tests/runner/test_tpu_runner_mesh.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
"""Unit tests for TPUModelRunner mesh initialization."""
2-
import os
32
from unittest.mock import Mock, patch
43

54
import pytest

tpu_inference/executors/ray_distributed_executor.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,8 @@ def _init_executor(self) -> None:
7474
self.forward_dag: Optional[ray.dag.CompiledDAG] = None
7575

7676
# Ensure Ray compiled DAG channel type is set for vLLM
77-
os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = tpu_envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
77+
os.environ[
78+
"VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = tpu_envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
7879

7980
# Currently, this requires USE_RAY_SPMD_WORKER=True.
8081
self.use_ray_compiled_dag = True
@@ -91,7 +92,8 @@ def _init_executor(self) -> None:
9192
# Ensure Ray usage stats collection setting is propagated to Ray workers.
9293
# Ray workers inherit environment variables, so we explicitly set this
9394
# based on our configuration (defaults to "0" to disable stats).
94-
os.environ["RAY_USAGE_STATS_ENABLED"] = tpu_envs.RAY_USAGE_STATS_ENABLED
95+
os.environ[
96+
"RAY_USAGE_STATS_ENABLED"] = tpu_envs.RAY_USAGE_STATS_ENABLED
9597

9698
# Create the parallel GPU workers.
9799
self._init_workers_ray(placement_group)

tpu_inference/layers/common/sharding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import json
22
import math
3-
import os
43
from dataclasses import asdict, dataclass
54
from typing import TYPE_CHECKING, List, Optional
65

tpu_inference/runner/tpu_runner.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import copy
22
import functools
3-
import os
43
import random
54
from contextlib import nullcontext
65
from dataclasses import dataclass
@@ -13,8 +12,6 @@
1312
import torch
1413
import vllm.envs as vllm_envs
1514
from flax import nnx
16-
17-
import tpu_inference.envs as envs
1815
from jax.experimental import mesh_utils
1916
from jax.sharding import NamedSharding, PartitionSpec
2017
from torchax.ops.mappings import j2t_dtype
@@ -37,6 +34,7 @@
3734
KVConnectorModelRunnerMixin
3835
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
3936

37+
import tpu_inference.envs as envs
4038
from tpu_inference import utils as common_utils
4139
from tpu_inference.layers.common.attention_metadata import AttentionMetadata
4240
from tpu_inference.layers.common.sharding import (MESH_AXIS_NAMES,

0 commit comments

Comments
 (0)