Skip to content

Commit fafad3a

Browse files
committed
[typo] fix typo of VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE
Signed-off-by: linfeng-yuan <1102311262@qq.com>
1 parent 3640c60 commit fafad3a

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

tests/multicard/test_offline_inference_distributed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def test_models_distributed_DeepSeek():
6161
vllm_model.generate_greedy(example_prompts, max_tokens)
6262

6363

64-
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_OPTIMZE": "1"})
64+
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1"})
6565
def test_models_distributed_topk() -> None:
6666
example_prompts = [
6767
"vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs.",

tests/singlecard/test_offline_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def test_multimodal(model, prompt_template, vllm_runner):
8585
max_tokens=64)
8686

8787

88-
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_OPTIMZE": "1"})
88+
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1"})
8989
def test_models_topk() -> None:
9090
example_prompts = [
9191
"Hello, my name is",

vllm_ascend/envs.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,8 @@
5757
lambda: bool(int(os.getenv("VLLM_ENABLE_MC2", '0'))),
5858
# Whether to enable the topk optimization. It's disabled by default for experimental support
5959
# We'll make it enabled by default in the future.
60-
"VLLM_ASCEND_ENABLE_TOPK_OPTIMZE":
61-
lambda: bool(int(os.getenv("VLLM_ASCEND_ENABLE_TOPK_OPTIMZE", '0'))),
60+
"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE":
61+
lambda: bool(int(os.getenv("VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE", '0'))),
6262
# Whether to use LCCL communication. If not set, the default value is False.
6363
"USING_LCCL_COM":
6464
lambda: bool(int(os.getenv("USING_LCCL_COM", '0'))),

vllm_ascend/patch/worker/patch_common/patch_sampler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,5 +97,5 @@ def topk_topp_forward_native(
9797

9898

9999
Sampler.apply_min_p = apply_min_p
100-
if envs.VLLM_ASCEND_ENABLE_TOPK_OPTIMZE:
100+
if envs.VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE:
101101
TopKTopPSampler.forward_native = topk_topp_forward_native

0 commit comments

Comments
 (0)