We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6dccb59 commit b805b77Copy full SHA for b805b77
tests/test_quantization/lifecycle/test_enabled.py
@@ -26,8 +26,8 @@
26
27
28
def test_quantization_enabled_disabled():
29
- inp = torch.randn(16, dtype=torch.float16)
30
- model = Linear(16, 16, dtype=torch.float16)
+ inp = torch.randn(16)
+ model = Linear(16, 16)
31
quantized_model = deepcopy(model)
32
apply_quantization_config(
33
model=quantized_model,
0 commit comments