Skip to content

Commit e3aa96d

Browse files
committed
[CI] fix
Signed-off-by: zzhx1 <zzh_201018@outlook.com>
1 parent 209aa6f commit e3aa96d

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

tests/ut/ops/test_vocab_parallel_embedding.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,10 @@ def setUp(self):
207207
patch(
208208
"vllm_ascend.ops.vocab_parallel_embedding.get_lmhead_tp_group.all_to_all",
209209
return_value=torch.randn(1, self.vocab_size))
210+
patch("vllm_ascend.ops.vocab_parallel_embedding.get_lmhead_tp_group.all_gather",
211+
return_value=torch.randn(1, self.vocab_size)),
212+
patch("vllm_ascend.core.schedule_config.AscendSchedulerConfig.initialize_from_config",
213+
return_value=MagicMock(max_num_batched_tokens=1000, max_model_len=512, enable_chunked_prefill=False))
210214
]
211215

212216
for p in self.patches:
@@ -229,4 +233,4 @@ def test_get_logits(self):
229233
lmhead.quant_method.apply = self.mock_quant_method.apply
230234
hidden_state = torch.randn(1, self.org_num_embeddings)
231235
processor._get_logits(hidden_state, lmhead)
232-
self.mock_quant_method.apply.assert_called_once()
236+
self.mock_quant_method.apply.assert_called_once()

0 commit comments

Comments
 (0)