Skip to content

Commit e0edcde

Browse files
author
offline0806
committed
[EPLB]
SignedFix ci.-off-by: offline0806 <z00858301@china.huawei.com>
1 parent 01545c1 commit e0edcde

File tree

2 files changed

+6
-5
lines changed

2 files changed

+6
-5
lines changed

vllm_ascend/ascend_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def __init__(self, vllm_config):
4949
"expert_map_record_path",
5050
None) # Provide path to export expert map
5151
self.init_redundancy_expert = additional_config.get(
52-
"init_redundancy_expert",0)
52+
"init_redundancy_expert", 0)
5353
self.dynamic_eplb = additional_config.get("dynamic_eplb", False)
5454
self.num_iterations_eplb_update = additional_config.get(
5555
"num_iterations_eplb_update", 400)

vllm_ascend/torchair/ops/torchair_fused_moe.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,12 @@
3737
FusedMoE, UnquantizedFusedMoEMethod, determine_expert_map)
3838
from vllm.model_executor.layers.quantization.base_config import \
3939
QuantizationConfig
40-
from vllm_ascend.eplb.core.eplb_utils import (
41-
determine_default_expert_map,
42-
determine_default_log2phy_map)
40+
4341
from vllm_ascend.ascend_config import get_ascend_config
4442
from vllm_ascend.ascend_forward_context import FusedMoEState
4543
from vllm_ascend.distributed.parallel_state import get_mc2_group
44+
from vllm_ascend.eplb.core.eplb_utils import (determine_default_expert_map,
45+
determine_default_log2phy_map)
4646
from vllm_ascend.ops.expert_load_balancer import ExpertLoadBalancer
4747
from vllm_ascend.ops.sequence_parallel import MetadataForPadding
4848
from vllm_ascend.quantization.quant_config import AscendFusedMoEMethod
@@ -1080,7 +1080,8 @@ def __init__(
10801080
assert self.quant_method is not None
10811081

10821082
self.moe_load = None
1083-
local_num_experts = (torch.sum(self.expert_map != -1) if self.expert_map is not None else num_experts)
1083+
local_num_experts = (torch.sum(self.expert_map != -1)
1084+
if self.expert_map is not None else num_experts)
10841085
if self.dynamic_eplb:
10851086
self.moe_load = torch.zeros(local_num_experts, dtype=torch.int64)
10861087

0 commit comments

Comments
 (0)