Skip to content

Commit 682963b

Browse files
committed
[CI/UT][Graph] Add ut for torchair graph mode
* use vllm-ascend/DeepSeek-V3 * use spawn * update cleanup dist env and mem * update torchair config * disable eager * enable refresh * use random weight * some fixes Signed-off-by: MengqingCao <cmq0113@163.com>
1 parent 47b507b commit 682963b

File tree

5 files changed

+121
-43
lines changed

5 files changed

+121
-43
lines changed

.github/workflows/vllm_ascend_test.yaml

Lines changed: 33 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -179,23 +179,24 @@ jobs:
179179
VLLM_WORKER_MULTIPROC_METHOD: spawn
180180
run: |
181181
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
182-
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
183-
# guided decoding doesn't work, fix it later
184-
# pytest -sv tests/singlecard/test_guided_decoding.py.py
185-
# test_ascend_config.py should be ran separately because it will regenerate the global config many times.
186-
pytest -sv tests/singlecard/test_ascend_config.py
187-
pytest -sv tests/singlecard/test_camem.py
188-
pytest -sv tests/singlecard/core/test_ascend_scheduler.py
189-
pytest -sv tests/singlecard/core/test_ascend_scheduler_e2e.py
190-
pytest -sv tests/singlecard/ \
191-
--ignore=tests/singlecard/test_offline_inference.py \
192-
--ignore=tests/singlecard/test_guided_decoding.py \
193-
--ignore=tests/singlecard/test_ascend_config.py \
194-
--ignore=tests/singlecard/test_camem.py \
195-
--ignore=tests/singlecard/core/test_ascend_scheduler.py \
196-
--ignore=tests/singlecard/core/test_ascend_scheduler_e2e.py
182+
echo "skip single card"
183+
# VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
184+
# # guided decoding doesn't work, fix it later
185+
# # pytest -sv tests/singlecard/test_guided_decoding.py.py
186+
# # test_ascend_config.py should be ran separately because it will regenerate the global config many times.
187+
# pytest -sv tests/singlecard/test_ascend_config.py
188+
# pytest -sv tests/singlecard/test_camem.py
189+
# pytest -sv tests/singlecard/core/test_ascend_scheduler.py
190+
# pytest -sv tests/singlecard/core/test_ascend_scheduler_e2e.py
191+
# pytest -sv tests/singlecard/ \
192+
# --ignore=tests/singlecard/test_offline_inference.py \
193+
# --ignore=tests/singlecard/test_guided_decoding.py \
194+
# --ignore=tests/singlecard/test_ascend_config.py \
195+
# --ignore=tests/singlecard/test_camem.py \
196+
# --ignore=tests/singlecard/core/test_ascend_scheduler.py \
197+
# --ignore=tests/singlecard/core/test_ascend_scheduler_e2e.py
197198
else
198-
pytest -sv tests/multicard/test_ilama_lora_tp2.py
199+
# pytest -sv tests/multicard/test_ilama_lora_tp2.py
199200
# To avoid oom, we need to run the test in a single process.
200201
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
201202
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek
@@ -210,21 +211,22 @@ jobs:
210211
VLLM_USE_V1: 0
211212
run: |
212213
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
213-
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
214-
# guided decoding doesn't work, fix it later
215-
# pytest -sv tests/singlecard/test_guided_decoding.py.py
216-
pytest -sv tests/singlecard/test_camem.py
217-
# test_ascend_config.py should be ran separately because it will regenerate the global config many times.
218-
pytest -sv tests/singlecard/test_ascend_config.py
219-
pytest -sv tests/singlecard/test_prompt_embedding.py
220-
pytest -sv tests/singlecard/ \
221-
--ignore=tests/singlecard/test_offline_inference.py \
222-
--ignore=tests/singlecard/test_guided_decoding.py \
223-
--ignore=tests/singlecard/test_camem.py \
224-
--ignore=tests/singlecard/test_ascend_config.py \
225-
--ignore=tests/singlecard/test_prompt_embedding.py \
226-
--ignore=tests/singlecard/core/test_ascend_scheduler.py \
227-
--ignore=tests/singlecard/core/test_ascend_scheduler_e2e.py
214+
echo "skip single card"
215+
# VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
216+
# # guided decoding doesn't work, fix it later
217+
# # pytest -sv tests/singlecard/test_guided_decoding.py.py
218+
# pytest -sv tests/singlecard/test_camem.py
219+
# # test_ascend_config.py should be ran separately because it will regenerate the global config many times.
220+
# pytest -sv tests/singlecard/test_ascend_config.py
221+
# pytest -sv tests/singlecard/test_prompt_embedding.py
222+
# pytest -sv tests/singlecard/ \
223+
# --ignore=tests/singlecard/test_offline_inference.py \
224+
# --ignore=tests/singlecard/test_guided_decoding.py \
225+
# --ignore=tests/singlecard/test_camem.py \
226+
# --ignore=tests/singlecard/test_ascend_config.py \
227+
# --ignore=tests/singlecard/test_prompt_embedding.py \
228+
# --ignore=tests/singlecard/core/test_ascend_scheduler.py \
229+
# --ignore=tests/singlecard/core/test_ascend_scheduler_e2e.py
228230
else
229231
pytest -sv tests/multicard/test_ilama_lora_tp2.py
230232
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py will raise error.

docs/source/user_guide/additional_config.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ The details of each config option are as follows:
5353
| ---- | ---- | ------- | ----------- |
5454
| `enabled` | bool | `False` | Whether to enable ascend scheduler for V1 engine|
5555

56-
ascend_scheduler_config also support the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `chunked_prefill_enabled: true` to ascend_scheduler_config as well.
56+
ascend_scheduler_config also support the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `enable_chunked_prefill: true` to ascend_scheduler_config as well.
5757

5858
### Example
5959

@@ -62,18 +62,18 @@ A full example of additional configuration is as follows:
6262
```
6363
{
6464
"torchair_graph_config": {
65-
"enabled": true,
66-
"use_cached_graph": true,
65+
"enabled": True,
66+
"use_cached_graph": True,
6767
"graph_batch_sizes": [1, 2, 4, 8],
68-
"graph_batch_sizes_init": false,
69-
"enable_multistream_moe": false,
70-
"enable_kv_nz": false
68+
"graph_batch_sizes_init": False,
69+
"enable_multistream_moe": False,
70+
"enable_kv_nz": False
7171
},
7272
"ascend_scheduler_config": {
73-
"enabled": true,
74-
"chunked_prefill_enabled": true,
73+
"enabled": True,
74+
"enable_chunked_prefill": True,
7575
},
7676
"expert_tensor_parallel_size": 1,
77-
"refresh": false,
77+
"refresh": False,
7878
}
7979
```

docs/source/user_guide/graph_mode.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,15 @@ from vllm import LLM
4747

4848
os.environ["VLLM_USE_V1"] = 1
4949

50-
model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True}})
50+
# TorchAirGraph is only work without chunked-prefill now
51+
model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True,}})
5152
outputs = model.generate("Hello, how are you?")
5253
```
5354

5455
online example:
5556

5657
```shell
57-
vllm serve Qwen/Qwen2-7B-Instruct --additional-config='{"torchair_graph_config": {"enabled": true}}'
58+
vllm serve Qwen/Qwen2-7B-Instruct --additional-config='{"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True,}}'
5859
```
5960

6061
You can find more detail about additional config [here](./additional_config.md)

tests/conftest.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
# Adapted from vllm-project/vllm/blob/main/tests/conftest.py
1818
#
1919

20+
import contextlib
2021
import gc
2122
from typing import List, Optional, Tuple, TypeVar, Union
2223

@@ -53,11 +54,17 @@
5354
PromptVideoInput = _PromptMultiModalInput[np.ndarray]
5455

5556

56-
def cleanup_dist_env_and_memory():
57+
def cleanup_dist_env_and_memory(shutdown_ray: bool = False):
5758
destroy_model_parallel()
5859
destroy_distributed_environment()
60+
with contextlib.suppress(AssertionError):
61+
torch.distributed.destroy_process_group()
62+
if shutdown_ray:
63+
import ray # Lazy import Ray
64+
ray.shutdown()
5965
gc.collect()
6066
torch.npu.empty_cache()
67+
torch.npu.reset_peak_memory_stats()
6168

6269

6370
class VllmRunner:
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
#
18+
"""Compare the short outputs of HF and vLLM when using greedy sampling.
19+
20+
Run `pytest tests/multicard/test_torchair_graph_mode.py`.
21+
"""
22+
import os
23+
24+
import pytest
25+
26+
from tests.conftest import VllmRunner
27+
28+
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
29+
30+
31+
@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "0",
32+
reason="torchair graph is not supported on v0")
33+
def test_e2e_deepseekv3_with_torchair(monkeypatch: pytest.MonkeyPatch):
34+
with monkeypatch.context() as m:
35+
m.setenv("VLLM_USE_MODELSCOPE", "True")
36+
m.setenv("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
37+
38+
example_prompts = [
39+
"Hello, my name is",
40+
"The president of the United States is",
41+
"The capital of France is",
42+
"The future of AI is",
43+
]
44+
dtype = "half"
45+
max_tokens = 5
46+
# torchair is only work without chunked-prefill now
47+
with VllmRunner(
48+
"vllm-ascend/DeepSeek-V3-Pruning",
49+
dtype=dtype,
50+
tensor_parallel_size=4,
51+
distributed_executor_backend="mp",
52+
additional_config={
53+
"torchair_graph_config": {
54+
"enabled": True,
55+
},
56+
"ascend_scheduler_config": {
57+
"enabled": True,
58+
},
59+
"refresh": True,
60+
},
61+
enforce_eager=False,
62+
) as vllm_model:
63+
vllm_output = vllm_model.generate_greedy(example_prompts,
64+
max_tokens)
65+
66+
for output in vllm_output:
67+
generated_text = output[1]
68+
print(f"Generated text: {generated_text!r}")

0 commit comments

Comments
 (0)