Skip to content

Commit 82d641a

Browse files
yao-matrixzucchini-nlp
authored andcommitted
enable internvl UTs on XPU (huggingface#37779)
* enable internvl UTs on XPU Signed-off-by: YAO Matrix <matrix.yao@intel.com> * fix style Signed-off-by: YAO Matrix <matrix.yao@intel.com> * fix style per comments Signed-off-by: Yao Matrix <matrix.yao@intel.com> --------- Signed-off-by: YAO Matrix <matrix.yao@intel.com> Signed-off-by: Yao Matrix <matrix.yao@intel.com>
1 parent b7fe854 commit 82d641a

File tree

2 files changed

+95
-20
lines changed

2 files changed

+95
-20
lines changed

tests/models/internvl/test_modeling_internvl.py

Lines changed: 92 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,13 @@
2727
is_vision_available,
2828
)
2929
from transformers.testing_utils import (
30+
Expectations,
3031
cleanup,
3132
require_av,
3233
require_bitsandbytes,
34+
require_deterministic_for_xpu,
3335
require_torch,
34-
require_torch_gpu,
36+
require_torch_accelerator,
3537
slow,
3638
torch_device,
3739
)
@@ -177,7 +179,7 @@ def create_and_check_model_fp16_autocast_forward(self, config, input_ids, pixel_
177179
model = InternVLForConditionalGeneration(config=config)
178180
model.to(torch_device)
179181
model.eval()
180-
with torch.autocast(device_type="cuda", dtype=torch.float16):
182+
with torch.autocast(device_type=torch_device, dtype=torch.float16):
181183
logits = model(
182184
input_ids=input_ids,
183185
attention_mask=attention_mask,
@@ -279,7 +281,7 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
279281

280282

281283
@slow
282-
@require_torch_gpu
284+
@require_torch_accelerator
283285
class InternVLQwen2IntegrationTest(unittest.TestCase):
284286
def setUp(self):
285287
self.small_model_checkpoint = "OpenGVLab/InternVL3-1B-hf"
@@ -326,14 +328,22 @@ def test_qwen2_small_model_integration_forward(self):
326328
output = model(**inputs)
327329

328330
actual_logits = output.logits[0, -1, :5].cpu()
329-
expected_logits = torch.tensor([11.9375, 14.8750, 14.0625, 10.7500, 6.9062], dtype=torch.bfloat16)
331+
expected_logits_all = Expectations(
332+
{
333+
("xpu", 3): torch.tensor([11.7500, 14.7500, 14.1250, 10.5625, 6.7812], dtype=torch.bfloat16),
334+
("cuda", 7): torch.tensor([11.9375, 14.8750, 14.0625, 10.7500, 6.9062], dtype=torch.bfloat16),
335+
}
336+
) # fmt: skip
337+
expected_logits = expected_logits_all.get_expectation()
338+
330339
self.assertTrue(
331340
torch.allclose(actual_logits, expected_logits, atol=0.1),
332341
f"Actual logits: {actual_logits}"
333342
f"\nExpected logits: {expected_logits}"
334343
f"\nDifference: {torch.abs(actual_logits - expected_logits)}",
335344
)
336345

346+
@require_deterministic_for_xpu
337347
def test_qwen2_small_model_integration_generate_text_only(self):
338348
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
339349
model = InternVLForConditionalGeneration.from_pretrained(
@@ -346,7 +356,15 @@ def test_qwen2_small_model_integration_generate_text_only(self):
346356
decoded_output = processor.decode(
347357
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
348358
)
349-
expected_output = "Whispers of dawn,\nSilent whispers of the night,\nNew day's light begins."
359+
360+
expected_outputs = Expectations(
361+
{
362+
("xpu", 3): "Whispers of dawn,\nSilent whispers of the night,\nNew day's light.",
363+
("cuda", 7): "Whispers of dawn,\nSilent whispers of the night,\nNew day's light begins.",
364+
}
365+
) # fmt: skip
366+
expected_output = expected_outputs.get_expectation()
367+
350368
self.assertEqual(decoded_output, expected_output)
351369

352370
def test_qwen2_small_model_integration_generate_chat_template(self):
@@ -375,6 +393,7 @@ def test_qwen2_small_model_integration_generate_chat_template(self):
375393
expected_output = "The image shows two cats lying on a pink blanket. The cat on the left is a tabby"
376394
self.assertEqual(decoded_output, expected_output)
377395

396+
@require_deterministic_for_xpu
378397
def test_qwen2_small_model_integration_batched_generate(self):
379398
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
380399
model = InternVLForConditionalGeneration.from_pretrained(
@@ -404,7 +423,15 @@ def test_qwen2_small_model_integration_batched_generate(self):
404423
)
405424
# Check second output
406425
decoded_output = processor.decode(output[1], skip_special_tokens=True)
407-
expected_output = 'user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese archway, known as a "Chinese Gate" or "Chinese Gate of' # fmt: skip
426+
427+
expected_outputs = Expectations(
428+
{
429+
("xpu", 3): 'user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese archway, known as a "Chinese Gate" or "Chinese Gate"',
430+
("cuda", 7): 'user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese archway, known as a "Chinese Gate" or "Chinese Gate of',
431+
}
432+
) # fmt: skip
433+
expected_output = expected_outputs.get_expectation()
434+
408435
self.assertEqual(
409436
decoded_output,
410437
expected_output,
@@ -455,7 +482,14 @@ def test_qwen2_small_model_integration_batched_generate_multi_image(self):
455482

456483
# Check second output
457484
decoded_output = processor.decode(output[1], skip_special_tokens=True)
458-
expected_output = 'user\n\nWhat are the differences between these two images?\nassistant\nThe images show the Statue of Liberty and the Golden Gate Bridge from different angles. Here are the differences:\n\n1. **Angle' # fmt: skip
485+
expected_outputs = Expectations(
486+
{
487+
("xpu", 3): "user\n\nWhat are the differences between these two images?\nassistant\nThe images show the Statue of Liberty and the Golden Gate Bridge from different angles. Here are the differences:\n\n1. **Foreground",
488+
("cuda", 7): "user\n\nWhat are the differences between these two images?\nassistant\nThe images show the Statue of Liberty and the Golden Gate Bridge from different angles. Here are the differences:\n\n1. **Angle",
489+
}
490+
) # fmt: skip
491+
expected_output = expected_outputs.get_expectation()
492+
459493
self.assertEqual(
460494
decoded_output,
461495
expected_output,
@@ -495,14 +529,21 @@ def test_qwen2_medium_model_integration_video(self):
495529
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
496530

497531
decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
498-
expected_output = 'The man is performing a forehand shot.' # fmt: skip
532+
expected_outputs = Expectations(
533+
{
534+
("xpu", 3): "The man is performing a volley.",
535+
("cuda", 7): "The man is performing a forehand shot.",
536+
}
537+
) # fmt: skip
538+
expected_output = expected_outputs.get_expectation()
499539
self.assertEqual(
500540
decoded_output,
501541
expected_output,
502542
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
503543
)
504544

505545
@require_av
546+
@require_deterministic_for_xpu
506547
def test_qwen2_small_model_integration_interleaved_images_videos(self):
507548
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
508549
model = InternVLForConditionalGeneration.from_pretrained(
@@ -564,15 +605,27 @@ def test_qwen2_small_model_integration_interleaved_images_videos(self):
564605

565606
decoded_output = processor.decode(output[0], skip_special_tokens=True)
566607
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
567-
expected_output = 'user\n\n\nWhat are the differences between these two images?\nassistant\nThe images depict two distinct scenes:\n\n1. **Left Image**: This shows the Statue of Liberty on Liberty Island, with the' # fmt: skip
608+
expected_outputs = Expectations(
609+
{
610+
("xpu", 3): "user\n\n\nWhat are the differences between these two images?\nassistant\nThe images depict two distinct scenes:\n\n1. **Left Image:**\n - The Statue of Liberty is prominently featured on an",
611+
("cuda", 7): "user\n\n\nWhat are the differences between these two images?\nassistant\nThe images depict two distinct scenes:\n\n1. **Left Image**: This shows the Statue of Liberty on Liberty Island, with the",
612+
}
613+
) # fmt: skip
614+
expected_output = expected_outputs.get_expectation()
568615
self.assertEqual(
569616
decoded_output,
570617
expected_output,
571618
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
572619
)
573620
# Check second output
574621
decoded_output = processor.decode(output[1], skip_special_tokens=True)
575-
expected_output = 'user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nA forehand shot' # fmt: skip
622+
expected_outputs = Expectations(
623+
{
624+
("xpu", 3): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot.",
625+
("cuda", 7): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nA forehand shot",
626+
}
627+
) # fmt: skip
628+
expected_output = expected_outputs.get_expectation()
576629
self.assertEqual(
577630
decoded_output,
578631
expected_output,
@@ -590,7 +643,7 @@ def test_qwen2_small_model_integration_interleaved_images_videos(self):
590643

591644

592645
@slow
593-
@require_torch_gpu
646+
@require_torch_accelerator
594647
class InternVLLlamaIntegrationTest(unittest.TestCase):
595648
def setUp(self):
596649
self.small_model_checkpoint = "OpenGVLab/InternVL2_5-2B-MPO-hf"
@@ -711,7 +764,13 @@ def test_llama_small_model_integration_batched_generate(self):
711764

712765
# Check first output
713766
decoded_output = processor.decode(output[0], skip_special_tokens=True)
714-
expected_output = 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.' # fmt: skip
767+
expected_outputs = Expectations(
768+
{
769+
("xpu", 3): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden path leads to calm lake,\nNature's peaceful grace.",
770+
("cuda", 7): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.",
771+
}
772+
) # fmt: skip
773+
expected_output = expected_outputs.get_expectation()
715774
self.assertEqual(
716775
decoded_output,
717776
expected_output,
@@ -880,7 +939,13 @@ def test_llama_small_model_integration_interleaved_images_videos(self):
880939

881940
decoded_output = processor.decode(output[0], skip_special_tokens=True)
882941
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
883-
expected_output = 'user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. Upon closer inspection, the differences between the two images are:\n\n1. **' # fmt: skip
942+
expected_outputs = Expectations(
943+
{
944+
("xpu", 3): "user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. After re-examining the images, I can see that they are actually",
945+
("cuda", 7): "user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. Upon closer inspection, the differences between the two images are:\n\n1. **",
946+
}
947+
) # fmt: skip
948+
expected_output = expected_outputs.get_expectation()
884949
self.assertEqual(
885950
decoded_output,
886951
expected_output,
@@ -889,7 +954,13 @@ def test_llama_small_model_integration_interleaved_images_videos(self):
889954

890955
# Check second output
891956
decoded_output = processor.decode(output[1], skip_special_tokens=True)
892-
expected_output = 'user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common shot in tennis where the player swings the racket across their' # fmt: skip
957+
expected_outputs = Expectations(
958+
{
959+
("xpu", 3): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common shot in tennis where the player swings the racket across their",
960+
("cuda", 7): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common shot in tennis where the player swings the racket across their",
961+
}
962+
) # fmt: skip
963+
expected_output = expected_outputs.get_expectation()
893964
self.assertEqual(
894965
decoded_output,
895966
expected_output,
@@ -898,7 +969,13 @@ def test_llama_small_model_integration_interleaved_images_videos(self):
898969

899970
# Check third output
900971
decoded_output = processor.decode(output[2], skip_special_tokens=True)
901-
expected_output = 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nA wooden path leads to the sea,\nPeaceful, untouched dreams.' # fmt: skip
972+
expected_outputs = Expectations(
973+
{
974+
("xpu", 3): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.",
975+
("cuda", 7): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nA wooden path leads to the sea,\nPeaceful, untouched dreams.",
976+
}
977+
) # fmt: skip
978+
expected_output = expected_outputs.get_expectation()
902979
self.assertEqual(
903980
decoded_output,
904981
expected_output,

tests/test_modeling_common.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@
8080
require_bitsandbytes,
8181
require_deepspeed,
8282
require_flash_attn,
83-
require_non_xpu,
8483
require_safetensors,
8584
require_torch,
8685
require_torch_accelerator,
@@ -2605,7 +2604,7 @@ def test_inputs_embeds_matches_input_ids(self):
26052604
)[0]
26062605
torch.testing.assert_close(out_embeds, out_ids)
26072606

2608-
@require_non_xpu
2607+
@require_torch_gpu
26092608
@require_torch_multi_gpu
26102609
def test_multi_gpu_data_parallel_forward(self):
26112610
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
@@ -3875,7 +3874,6 @@ def test_sdpa_can_dispatch_on_flash(self):
38753874
with sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
38763875
_ = model(**inputs_dict)
38773876

3878-
@require_non_xpu
38793877
@require_torch_sdpa
38803878
@require_torch_accelerator
38813879
@slow
@@ -3888,8 +3886,8 @@ def test_sdpa_can_compile_dynamic(self):
38883886
self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0")
38893887
elif device_type == "rocm" and major < 9:
38903888
self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0")
3891-
else:
3892-
self.skipTest(reason="This test requires a Nvidia or AMD GPU")
3889+
elif device_type not in ["cuda", "rocm", "xpu"]:
3890+
self.skipTest(reason="This test requires a Nvidia or AMD GPU, or an Intel XPU")
38933891

38943892
torch.compiler.reset()
38953893

0 commit comments

Comments
 (0)