Skip to content

Commit 20af2b5

Browse files
dragondream-chendsxsteven
authored andcommitted
fix pre-commit
Signed-off-by: chenmenglong <[email protected]>
1 parent 25e6762 commit 20af2b5

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

vllm/model_executor/layers/fused_moe/layer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2061,7 +2061,7 @@ def select_experts(
20612061
# ExpertTokensMetadata.expert_num_tokens for better performance.
20622062
# For other implementations or when metadata is not available,
20632063
# we fall back to here.
2064-
2064+
20652065
# There is no expert_num_tokens in
20662066
# expert_tokens_meta of DeepEPHTPrepareAndFinalize
20672067
# so it is not supported DeepEPHTPrepareAndFinalize for now.

vllm/model_executor/layers/fused_moe/modular_kernel.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1257,10 +1257,10 @@ def forward(
12571257
as_tuple=False).squeeze()
12581258
else:
12591259
if not torch.equal(self.expert_map, expert_map):
1260-
self.expert_map = expert_map.clone()
1261-
self.local_to_global_physical_experts = \
1262-
torch.nonzero(expert_map != -1,
1263-
as_tuple=False).squeeze()
1260+
self.expert_map = expert_map.clone()
1261+
self.local_to_global_physical_experts = \
1262+
torch.nonzero(expert_map != -1,
1263+
as_tuple=False).squeeze()
12641264
# Use pre-computed expert token counts from metadata
12651265
expert_load_view.scatter_add_(
12661266
dim=0,

0 commit comments

Comments
 (0)