Skip to content

Commit 777046f

Browse files
committed
[Fix] Update input embeddings condition to include prompt embeddings for multimodal models
Signed-off-by: jesse <szxfml@gmail.com>
1 parent 3f29bec commit 777046f

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

vllm_ascend/worker/model_runner_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device):
297297
self.is_multimodal_model = self.model_config.is_multimodal_model
298298
self.is_pooling_model = self.model_config.pooler_config is not None
299299
self.enable_prompt_embeds = self.model_config.enable_prompt_embeds
300-
if self.is_multimodal_model:
300+
if self.is_multimodal_model or self.enable_prompt_embeds:
301301
self.inputs_embeds = self._make_buffer(self.max_num_tokens,
302302
self.model_config.get_hidden_size(),
303303
dtype=self.dtype,

0 commit comments

Comments
 (0)