From 0eef8fb2cf06d78a01df2e44a0a9fe70f0e617f4 Mon Sep 17 00:00:00 2001 From: Gerson Beckenkamp Date: Mon, 16 Dec 2024 23:59:06 +0000 Subject: [PATCH 1/3] Add inductor tests to test.py --- test.py | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/test.py b/test.py index ed8413839f..e1915da78c 100644 --- a/test.py +++ b/test.py @@ -54,7 +54,7 @@ def _create_example_model_instance(task: ModelTask, device: str): ) -def _load_test(path, device): +def _load_test(path, device, mode): model_name = os.path.basename(path) def _skip_cuda_memory_check_p(metadata): @@ -96,7 +96,7 @@ def train_fn(self): ): try: task.make_model_instance( - test="train", device=device, batch_size=batch_size + test="train", device=device, batch_size=batch_size, extra_args=["--inductor"] if mode == "inductor" else [] ) task.invoke() task.check_details_train(device=device, md=metadata) @@ -119,7 +119,7 @@ def eval_fn(self): ): try: task.make_model_instance( - test="eval", device=device, batch_size=batch_size + test="eval", device=device, batch_size=batch_size, extra_args=["--inductor"] if mode == "inductor" else [] ) task.invoke() task.check_details_eval(device=device, md=metadata) @@ -149,22 +149,24 @@ def check_device_fn(self): [example_fn, train_fn, eval_fn, check_device_fn], ["example", "train", "eval", "check_device"], ): - # set exclude list based on metadata - setattr( - TestBenchmark, - f"test_{model_name}_{fn_name}_{device}", - ( - unittest.skipIf( - skip_by_metadata( - test=fn_name, device=device, extra_args=[], metadata=metadata - ), - "This test is skipped by its metadata", - )(fn) - ), - ) + for mode in ["eager", "inductor"]: + # set exclude list based on metadata + setattr( + TestBenchmark, + f"test_{model_name}_{fn_name}_{device}" if fn_name in ["example", "check_device"] else f"test_{model_name}_{fn_name}_{device}_{mode}", + ( + unittest.skipIf( + skip_by_metadata( + test=fn_name, device=device, extra_args=[], metadata=metadata + ), + "This test is skipped by its metadata", + )(fn) + ), + ) def _load_tests(): + modes = ["eager", "inductor"] devices = ["cpu"] if torch.cuda.is_available(): devices.append("cuda") @@ -176,12 +178,9 @@ def _load_tests(): if os.getenv("USE_CANARY_MODELS"): model_paths.extend(_list_canary_model_paths()) for path in model_paths: - # TODO: skipping quantized tests for now due to BC-breaking changes for prepare - # api, enable after PyTorch 1.13 release - if "quantized" in path: - continue for device in devices: - _load_test(path, device) + for mode in modes: + _load_test(path, device, mode) _load_tests() From 0dd2b007803cc3ff662272ca749cf9d759005982 Mon Sep 17 00:00:00 2001 From: Gerson Beckenkamp Date: Tue, 17 Dec 2024 00:12:55 +0000 Subject: [PATCH 2/3] Add quantized back --- test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test.py b/test.py index e1915da78c..7f5e031d3e 100644 --- a/test.py +++ b/test.py @@ -178,6 +178,10 @@ def _load_tests(): if os.getenv("USE_CANARY_MODELS"): model_paths.extend(_list_canary_model_paths()) for path in model_paths: + # TODO: skipping quantized tests for now due to BC-breaking changes for prepare + # api, enable after PyTorch 1.13 release + if "quantized" in path: + continue for device in devices: for mode in modes: _load_test(path, device, mode) From 0f4f627b8d322fd8d9d53f55c92268f486a51900 Mon Sep 17 00:00:00 2001 From: Gerson Beckenkamp Date: Tue, 17 Dec 2024 13:00:29 +0000 Subject: [PATCH 3/3] Allow all tests to be validated for both backends --- test.py | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/test.py b/test.py index 7f5e031d3e..4fe81b5643 100644 --- a/test.py +++ b/test.py @@ -36,14 +36,17 @@ def tearDown(self): gc.collect() -def _create_example_model_instance(task: ModelTask, device: str): +def _create_example_model_instance(task: ModelTask, device: str, mode: str): skip = False + extra_args = ["--accuracy"] + if mode == "inductor": + extra_args.append("--inductor") try: - task.make_model_instance(test="eval", device=device, extra_args=["--accuracy"]) + task.make_model_instance(test="eval", device=device, extra_args=extra_args) except NotImplementedError: try: task.make_model_instance( - test="train", device=device, extra_args=["--accuracy"] + test="train", device=device, extra_args=extra_args ) except NotImplementedError: skip = True @@ -70,7 +73,7 @@ def example_fn(self): skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual ): try: - _create_example_model_instance(task, device) + _create_example_model_instance(task, device, mode) accuracy = task.get_model_attribute("accuracy") assert ( accuracy == "pass" @@ -136,7 +139,7 @@ def check_device_fn(self): skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual ): try: - task.make_model_instance(test="eval", device=device) + task.make_model_instance(test="eval", device=device, extra_args=["--inductor"] if mode == "inductor" else []) task.check_device() task.del_model_instance() except NotImplementedError as e: @@ -149,20 +152,20 @@ def check_device_fn(self): [example_fn, train_fn, eval_fn, check_device_fn], ["example", "train", "eval", "check_device"], ): - for mode in ["eager", "inductor"]: - # set exclude list based on metadata - setattr( - TestBenchmark, - f"test_{model_name}_{fn_name}_{device}" if fn_name in ["example", "check_device"] else f"test_{model_name}_{fn_name}_{device}_{mode}", - ( - unittest.skipIf( - skip_by_metadata( - test=fn_name, device=device, extra_args=[], metadata=metadata - ), - "This test is skipped by its metadata", - )(fn) - ), - ) + # set exclude list based on metadata + setattr( + TestBenchmark, + f"test_{model_name}_{fn_name}_{device}_{mode}", + ( + unittest.skipIf( + # This is expecting that models will never be skipped just based on backend, just on eval or train functions being implemented + skip_by_metadata( + test=fn_name, device=device, extra_args=[], metadata=metadata + ), + "This test is skipped by its metadata", + )(fn) + ), + ) def _load_tests():