Skip to content

Commit ad92dee

Browse files
committed
fix
Signed-off-by: David9857 <985700846@qq.com>
1 parent 9195c05 commit ad92dee

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

vllm_ascend/models/qwen3.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,24 +2,24 @@
22
from typing import Optional, Union
33

44
import torch
5-
from torch import nn
6-
import torch.nn.functional as F
75
import torch.distributed as dist
6+
import torch.nn.functional as F
7+
from torch import nn
88
from transformers import Qwen3Config
9+
from vllm.attention import AttentionType
910
from vllm.compilation.decorators import support_torch_compile
1011
from vllm.config import CacheConfig, VllmConfig
11-
from vllm.attention import AttentionType
12-
from vllm.distributed import (get_pp_group,
12+
from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank,
1313
get_tensor_model_parallel_world_size,
14-
get_tensor_model_parallel_rank,
1514
tensor_model_parallel_all_gather)
15+
from vllm.model_executor.layers.linear import (ReplicatedLinear,
16+
RowParallelLinear)
1617
from vllm.model_executor.layers.logits_processor import LogitsProcessor
1718
from vllm.model_executor.layers.quantization import QuantizationConfig
1819
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
19-
from vllm.model_executor.layers.linear import RowParallelLinear, ReplicatedLinear
2020
from vllm.model_executor.models.interfaces import SupportsLoRA, SupportsPP
21-
from vllm.model_executor.models.qwen2 import Qwen2Model
2221
from vllm.model_executor.models.qwen2 import Qwen2MLP as Qwen3MLP
22+
from vllm.model_executor.models.qwen2 import Qwen2Model
2323
from vllm.model_executor.models.qwen3 import Qwen3Attention
2424
from vllm.model_executor.models.utils import (AutoWeightsLoader,
2525
PPMissingLayer, maybe_prefix)

0 commit comments

Comments
 (0)