Skip to content

Commit 1420ed6

Browse files
[CI]Add e2e test for 310p
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
1 parent 8cfd257 commit 1420ed6

File tree

5 files changed

+211
-0
lines changed

5 files changed

+211
-0
lines changed

.github/actionlint.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,7 @@ self-hosted-runner:
55
- linux-arm64-npu-2
66
- linux-arm64-npu-4
77
- linux-arm64-npu-static-8
8+
- linux-aarch64-310p-1
9+
- linux-aarch64-310p-2
10+
- linux-aarch64-310p-4
811
- ubuntu-24.04-arm

.github/workflows/vllm_ascend_test.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,7 @@ jobs:
205205
--ignore=tests/e2e/singlecard/test_embedding.py \
206206
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py \
207207
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py
208+
--ignore=tests/e2e/singlecard/test_offline_inference_310p.py
208209
# ------------------------------------ v1 spec decode test ------------------------------------ #
209210
VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py
210211
# TODO: revert me when test_v1_spec_decode.py::test_ngram_correctness is fixed
@@ -284,3 +285,4 @@ jobs:
284285
pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py \
285286
--ignore=tests/e2e/multicard/test_offline_inference_distributed.py \
286287
--ignore=tests/e2e/multicard/test_data_parallel.py
288+
--ignore=tests/e2e/multicard/test_offline_inference_310p.py
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
18+
name: 'e2e test / 310p-test'
19+
20+
on:
21+
push:
22+
tags:
23+
- 'v*'
24+
schedule:
25+
# Runs every 6 hours
26+
- cron: '0 */6 * * *'
27+
pull_request:
28+
types: [ labeled ]
29+
30+
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
31+
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
32+
# It's used to activate ascend-toolkit environment variables.
33+
defaults:
34+
run:
35+
shell: bash -el {0}
36+
37+
# only cancel in-progress runs of the same workflow
38+
# and ignore the lint / 1 card / 4 cards test type
39+
concurrency:
40+
group: ${{ github.workflow }}-${{ github.ref }}
41+
cancel-in-progress: true
42+
43+
jobs:
44+
e2e:
45+
# e2e-310p-test will be triggered when tag 'e2e-310p-test' & 'ready-for-test' or schedule job
46+
if: >-
47+
${{
48+
(contains(github.event.pull_request.labels.*.name, 'e2e-310p-test')) &&
49+
contains(github.event.pull_request.labels.*.name, 'ready-for-test') ||
50+
github.event_name == 'schedule' || github.event_name == 'push'
51+
}}
52+
strategy:
53+
max-parallel: 2
54+
matrix:
55+
os: [linux-aarch64-310p-1, linux-aarch64-310p-4]
56+
vllm_version: [main, v0.9.2]
57+
name: 310p e2e test
58+
runs-on: ${{ matrix.os }}
59+
container:
60+
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
61+
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-310p-ubuntu22.04-py3.10
62+
env:
63+
VLLM_LOGGING_LEVEL: ERROR
64+
VLLM_USE_MODELSCOPE: True
65+
steps:
66+
- name: Check npu and CANN info
67+
run: |
68+
npu-smi info
69+
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
70+
71+
- name: Config mirrors
72+
run: |
73+
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
74+
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
75+
apt-get update -y
76+
apt install git -y
77+
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
78+
79+
- name: Checkout vllm-project/vllm-ascend repo
80+
uses: actions/checkout@v4
81+
82+
- name: Install system dependencies
83+
run: |
84+
apt-get -y install `cat packages.txt`
85+
apt-get -y install git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
86+
87+
- name: Checkout vllm-project/vllm repo
88+
uses: actions/checkout@v4
89+
with:
90+
repository: vllm-project/vllm
91+
ref: ${{ matrix.vllm_version }}
92+
path: ./vllm-empty
93+
94+
- name: Install vllm-project/vllm from source
95+
working-directory: ./vllm-empty
96+
run: |
97+
VLLM_TARGET_DEVICE=empty pip install -e .
98+
99+
- name: Install vllm-project/vllm-ascend
100+
run: |
101+
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
102+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
103+
export SOC_VERSION=ASCEND310P3
104+
pip install -r requirements-dev.txt
105+
pip install -v -e .
106+
107+
- name: Run e2e test
108+
env:
109+
VLLM_WORKER_MULTIPROC_METHOD: spawn
110+
VLLM_USE_MODELSCOPE: True
111+
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
112+
run: |
113+
if [[ "${{ matrix.os }}" == "linux-aarch64-310p-1" ]]; then
114+
pytest -sv tests/e2e/singlecard/test_offline_inference_310p.py
115+
else
116+
pytest -sv tests/e2e/multicard/test_offline_inference_310p.py
117+
fi
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
import pytest
18+
import vllm # noqa: F401
19+
20+
import vllm_ascend # noqa: F401
21+
from tests.e2e.conftest import VllmRunner
22+
23+
# Pangu local model path
24+
MODELS = [
25+
"IntervitensInc/pangu-pro-moe-model",
26+
]
27+
28+
29+
@pytest.mark.parametrize("model", MODELS)
30+
@pytest.mark.parametrize("dtype", ["float16"])
31+
@pytest.mark.parametrize("max_tokens", [5])
32+
def test_pangu_model(model: str, dtype: str, max_tokens: int) -> None:
33+
example_prompts = [
34+
"Hello, my name is",
35+
"The future of AI is",
36+
]
37+
38+
with VllmRunner(model,
39+
tensor_parallel_size=4,
40+
dtype=dtype,
41+
max_model_len=1024,
42+
enforce_eager=True,
43+
enable_expert_parallel=True,
44+
distributed_executor_backend="mp") as vllm_model:
45+
vllm_model.generate_greedy(example_prompts, max_tokens)
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
import pytest
18+
import vllm # noqa: F401
19+
20+
import vllm_ascend # noqa: F401
21+
from tests.e2e.conftest import VllmRunner
22+
23+
MODELS = ["Qwen/Qwen3-0.6B-Base", "Qwen/Qwen2.5-7B-Instruct"]
24+
25+
26+
@pytest.mark.parametrize("model", MODELS)
27+
@pytest.mark.parametrize("dtype", ["float16"])
28+
@pytest.mark.parametrize("max_tokens", [5])
29+
def test_models(model: str, dtype: str, max_tokens: int) -> None:
30+
example_prompts = [
31+
"Hello, my name is",
32+
"The future of AI is",
33+
]
34+
35+
with VllmRunner(model,
36+
tensor_parallel_size=1,
37+
dtype=dtype,
38+
max_model_len=2048,
39+
enforce_eager=True,
40+
compilation_config={
41+
"custom_ops":
42+
["none", "+rms_norm", "+rotary_embedding"]
43+
}) as vllm_model:
44+
vllm_model.generate_greedy(example_prompts, max_tokens)

0 commit comments

Comments
 (0)