From a55a05fed1afdeb634e9814e26d885ffeed413c4 Mon Sep 17 00:00:00 2001 From: BowTen Date: Sat, 29 Nov 2025 23:38:20 +0800 Subject: [PATCH] [Doc]: Fix typo in fused_moe layer Signed-off-by: BowTen --- vllm/model_executor/layers/fused_moe/layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 0ef3130b2633..e180b4f4ba23 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1422,7 +1422,7 @@ def _maybe_make_contiguous( # do nothing. return p - # Do not update the layer paramater as the layer's MoE operations would + # Do not update the layer parameter as the layer's MoE operations would # expect the parameter's tensor to the same shape / stride. Instead, # make a new torch.nn.Parameter that is used just in the context of # EPLB.