File tree Expand file tree Collapse file tree 2 files changed +5
-5
lines changed
vllm/model_executor/layers/fused_moe Expand file tree Collapse file tree 2 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -2061,7 +2061,7 @@ def select_experts(
20612061 # ExpertTokensMetadata.expert_num_tokens for better performance.
20622062 # For other implementations or when metadata is not available,
20632063 # we fall back to here.
2064-
2064+
20652065 # There is no expert_num_tokens in
20662066 # expert_tokens_meta of DeepEPHTPrepareAndFinalize
20672067 # so it is not supported DeepEPHTPrepareAndFinalize for now.
Original file line number Diff line number Diff line change @@ -1257,10 +1257,10 @@ def forward(
12571257 as_tuple = False ).squeeze ()
12581258 else :
12591259 if not torch .equal (self .expert_map , expert_map ):
1260- self .expert_map = expert_map .clone ()
1261- self .local_to_global_physical_experts = \
1262- torch .nonzero (expert_map != - 1 ,
1263- as_tuple = False ).squeeze ()
1260+ self .expert_map = expert_map .clone ()
1261+ self .local_to_global_physical_experts = \
1262+ torch .nonzero (expert_map != - 1 ,
1263+ as_tuple = False ).squeeze ()
12641264 # Use pre-computed expert token counts from metadata
12651265 expert_load_view .scatter_add_ (
12661266 dim = 0 ,
You can’t perform that action at this time.
0 commit comments