Skip to content

Commit d18fb08

Browse files
committed
pre-commit
1 parent 6c8572b commit d18fb08

File tree

1 file changed

+5
-8
lines changed

1 file changed

+5
-8
lines changed

tests/tests_pytorch/plugins/precision/test_amp.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,12 @@
1414
from unittest.mock import Mock
1515

1616
import pytest
17-
from torch.optim import Optimizer
18-
19-
from lightning.pytorch.plugins import MixedPrecision
20-
from lightning.pytorch.utilities import GradClipAlgorithmType
21-
22-
from torch import nn
2317
import torch
18+
from torch import nn
19+
from torch.optim import Optimizer
2420

2521
from lightning.pytorch.plugins.precision import MixedPrecision
22+
from lightning.pytorch.utilities import GradClipAlgorithmType
2623

2724

2825
def test_clip_gradients():
@@ -62,7 +59,7 @@ def test_optimizer_amp_scaling_support_in_step_method():
6259
def test_amp_with_no_grad(precision: str):
6360
layer = nn.Linear(2, 1)
6461
x = torch.randn(1, 2)
65-
amp = MixedPrecision(precision=precision, device='cpu')
62+
amp = MixedPrecision(precision=precision, device="cpu")
6663

6764
with amp.autocast_context_manager():
6865
with torch.no_grad():
@@ -72,4 +69,4 @@ def test_amp_with_no_grad(precision: str):
7269

7370
loss.backward()
7471

75-
assert loss.grad_fn is not None
72+
assert loss.grad_fn is not None

0 commit comments

Comments
 (0)