Skip to content

Commit a240a5e

Browse files
[CI]Add e2e test for 310p
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
1 parent 0665500 commit a240a5e

File tree

4 files changed

+296
-0
lines changed

4 files changed

+296
-0
lines changed

.github/actionlint.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,7 @@ self-hosted-runner:
55
- linux-arm64-npu-2
66
- linux-arm64-npu-4
77
- linux-arm64-npu-static-8
8+
- linux-aarch64-310p-1
9+
- linux-aarch64-310p-2
10+
- linux-aarch64-310p-4
811
- ubuntu-24.04-arm
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
18+
name: 'e2e test / 310p-test'
19+
20+
on:
21+
push:
22+
tags:
23+
- 'v*'
24+
schedule:
25+
# Runs every 6 hours
26+
- cron: '0 */6 * * *'
27+
pull_request:
28+
types: [ labeled ]
29+
30+
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
31+
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
32+
# It's used to activate ascend-toolkit environment variables.
33+
defaults:
34+
run:
35+
shell: bash -el {0}
36+
37+
# only cancel in-progress runs of the same workflow
38+
# and ignore the lint / 1 card / 4 cards test type
39+
concurrency:
40+
group: ${{ github.workflow }}-${{ github.ref }}
41+
cancel-in-progress: true
42+
43+
jobs:
44+
e2e:
45+
# e2e-310p-test will be triggered when tag 'e2e-310p-test' & 'ready-for-test' or schedule job
46+
if: >-
47+
${{
48+
(contains(github.event.pull_request.labels.*.name, 'e2e-310p-test')) &&
49+
contains(github.event.pull_request.labels.*.name, 'ready-for-test') ||
50+
github.event_name == 'schedule' || github.event_name == 'push'
51+
}}
52+
strategy:
53+
max-parallel: 2
54+
matrix:
55+
os: [linux-aarch64-310p-1]
56+
vllm_version: [main, v0.9.2]
57+
name: singlecard 310p e2e test
58+
runs-on: ${{ matrix.os }}
59+
container:
60+
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
61+
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-310p-ubuntu22.04-py3.10
62+
env:
63+
VLLM_LOGGING_LEVEL: ERROR
64+
VLLM_USE_MODELSCOPE: True
65+
steps:
66+
- name: Check npu and CANN info
67+
run: |
68+
npu-smi info
69+
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
70+
71+
- name: Config mirrors
72+
run: |
73+
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
74+
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
75+
apt-get update -y
76+
apt install git -y
77+
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
78+
79+
- name: Checkout vllm-project/vllm repo
80+
uses: actions/checkout@v4
81+
with:
82+
repository: vllm-project/vllm
83+
ref: ${{ matrix.vllm_version }}
84+
path: ./vllm-empty
85+
86+
- name: Checkout vllm-project/vllm-ascend repo
87+
uses: actions/checkout@v4
88+
89+
- name: Install system dependencies
90+
run: |
91+
apt-get -y install `cat packages.txt`
92+
apt-get -y install git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
93+
94+
- name: Install vllm-project/vllm from source
95+
working-directory: ./vllm-empty
96+
run: |
97+
VLLM_TARGET_DEVICE=empty pip install -e .
98+
99+
- name: Install vllm-project/vllm-ascend
100+
working-directory: ./vllm-ascend
101+
run: |
102+
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
103+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
104+
export SOC_VERSION=ASCEND310P3
105+
pip install -r requirements-dev.txt
106+
pip install -v -e .
107+
108+
- name: Run e2e test
109+
env:
110+
VLLM_WORKER_MULTIPROC_METHOD: spawn
111+
VLLM_USE_MODELSCOPE: True
112+
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
113+
run: |
114+
pytest -sv tests/e2e/310p/test_offline_inference_singlecard.py
115+
116+
e2e-4-cards:
117+
# e2e-310p-test will be triggered when tag 'e2e-310p-test' & 'ready-for-test' or schedule job
118+
if: >-
119+
${{
120+
(contains(github.event.pull_request.labels.*.name, 'e2e-310p-test')) &&
121+
contains(github.event.pull_request.labels.*.name, 'ready-for-test') ||
122+
github.event_name == 'schedule' || github.event_name == 'push'
123+
}}
124+
strategy:
125+
max-parallel: 2
126+
matrix:
127+
os: [linux-aarch64-310p-4]
128+
vllm_version: [main, v0.9.2]
129+
name: multicard 310p e2e test
130+
runs-on: ${{ matrix.os }}
131+
container:
132+
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
133+
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-310p-ubuntu22.04-py3.10
134+
env:
135+
VLLM_LOGGING_LEVEL: ERROR
136+
VLLM_USE_MODELSCOPE: True
137+
steps:
138+
- name: Check npu and CANN info
139+
run: |
140+
npu-smi info
141+
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
142+
143+
- name: Config mirrors
144+
run: |
145+
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
146+
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
147+
apt-get update -y
148+
apt install git -y
149+
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
150+
151+
- name: Checkout vllm-project/vllm repo
152+
uses: actions/checkout@v4
153+
with:
154+
repository: vllm-project/vllm
155+
ref: ${{ matrix.vllm_version }}
156+
path: ./vllm-empty
157+
158+
- name: Checkout vllm-project/vllm-ascend repo
159+
uses: actions/checkout@v4
160+
with:
161+
repository: vllm-project/vllm-ascend
162+
163+
- name: Install system dependencies
164+
run: |
165+
apt-get -y install `cat packages.txt`
166+
apt-get -y install git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
167+
168+
- name: Install vllm-project/vllm from source
169+
working-directory: ./vllm-empty
170+
run: |
171+
VLLM_TARGET_DEVICE=empty pip install -e .
172+
173+
- name: Install vllm-project/vllm-ascend
174+
run: |
175+
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
176+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
177+
export SOC_VERSION=ASCEND310P3
178+
pip install -r requirements-dev.txt
179+
pip install -v -e .
180+
181+
- name: Run e2e test
182+
env:
183+
VLLM_WORKER_MULTIPROC_METHOD: spawn
184+
VLLM_USE_MODELSCOPE: True
185+
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
186+
run: |
187+
pytest -sv tests/e2e/310p/test_offline_inference_multicard.py
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
18+
#
19+
"""Compare the short outputs of the Pangu (Ascend) model when using greedy sampling.
20+
21+
Run `pytest tests/e2e/test_offline_inference.py`.
22+
"""
23+
import pytest
24+
import vllm # noqa: F401
25+
26+
import vllm_ascend # noqa: F401
27+
from tests.e2e.conftest import VllmRunner
28+
29+
# Pangu local model path
30+
MODELS = [
31+
"IntervitensInc/pangu-pro-moe-model",
32+
]
33+
34+
35+
@pytest.mark.parametrize("model", MODELS)
36+
@pytest.mark.parametrize("dtype", ["float16"])
37+
@pytest.mark.parametrize("max_tokens", [5])
38+
def test_pangu_model(model: str, dtype: str, max_tokens: int) -> None:
39+
example_prompts = [
40+
"Hello, my name is",
41+
"The future of AI is",
42+
]
43+
44+
with VllmRunner(model,
45+
tensor_parallel_size=4,
46+
dtype=dtype,
47+
max_model_len=1024,
48+
enforce_eager=True,
49+
enable_expert_parallel=True,
50+
distributed_executor_backend="mp") as vllm_model:
51+
vllm_model.generate_greedy(example_prompts, max_tokens)
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
18+
#
19+
"""Compare the short outputs of HF and vLLM when using greedy sampling.
20+
21+
Run `pytest tests/test_offline_inference.py`.
22+
"""
23+
import os
24+
25+
import pytest
26+
import vllm # noqa: F401
27+
28+
import vllm_ascend # noqa: F401
29+
from tests.e2e.conftest import VllmRunner
30+
31+
MODELS = ["Qwen/Qwen3-0.6B-Base", "Qwen/Qwen2.5-7B-Instruct"]
32+
33+
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
34+
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
35+
36+
37+
@pytest.mark.parametrize("model", MODELS)
38+
@pytest.mark.parametrize("dtype", ["float16"])
39+
@pytest.mark.parametrize("max_tokens", [5])
40+
def test_models(model: str, dtype: str, max_tokens: int) -> None:
41+
example_prompts = [
42+
"Hello, my name is",
43+
"The future of AI is",
44+
]
45+
46+
with VllmRunner(model,
47+
tensor_parallel_size=1,
48+
dtype=dtype,
49+
max_model_len=2048,
50+
enforce_eager=True,
51+
compilation_config={
52+
"custom_ops":
53+
["none", "+rms_norm", "+rotary_embedding"]
54+
}) as vllm_model:
55+
vllm_model.generate_greedy(example_prompts, max_tokens)

0 commit comments

Comments
 (0)