|
1 |
| -import collections |
2 | 1 | import dataclasses
|
3 | 2 |
|
| 3 | +import pytest |
4 | 4 | import torch
|
5 | 5 | from lightning.fabric.utilities.optimizer import _optimizer_to_device
|
6 | 6 | from torch import Tensor
|
7 | 7 |
|
| 8 | +from tests_fabric.helpers.runif import RunIf |
8 | 9 |
|
9 |
| -def test_optimizer_to_device(): |
10 |
| - @dataclasses.dataclass(frozen=True) |
| 10 | + |
| 11 | +@pytest.mark.parametrize( |
| 12 | + "optimizer_class", |
| 13 | + [ |
| 14 | + torch.optim.Adam, |
| 15 | + torch.optim.AdamW, |
| 16 | + torch.optim.SGD, |
| 17 | + torch.optim.RMSprop, |
| 18 | + torch.optim.Adagrad, |
| 19 | + torch.optim.Adadelta, |
| 20 | + torch.optim.Adamax, |
| 21 | + ], |
| 22 | +) |
| 23 | +@pytest.mark.parametrize( |
| 24 | + "src_device", |
| 25 | + [ |
| 26 | + torch.device("cpu"), |
| 27 | + pytest.param(torch.device("cuda"), marks=RunIf(min_cuda_gpus=1)), |
| 28 | + ], |
| 29 | +) |
| 30 | +@pytest.mark.parametrize( |
| 31 | + "dst_device", |
| 32 | + [ |
| 33 | + torch.device("cpu"), |
| 34 | + pytest.param(torch.device("cuda"), marks=RunIf(min_cuda_gpus=1)), |
| 35 | + ], |
| 36 | +) |
| 37 | +def test_optimizer_to_device(optimizer_class, src_device, dst_device): |
| 38 | + # Optimizer with no state initialized |
| 39 | + model = torch.nn.Linear(2, 2, device=src_device) |
| 40 | + optimizer = optimizer_class(model.parameters(), lr=0.1) |
| 41 | + _optimizer_to_device(optimizer, dst_device) |
| 42 | + _assert_opt_parameters_on_device(optimizer, dst_device) |
| 43 | + |
| 44 | + # Optimizer with state initialized |
| 45 | + model = torch.nn.Linear(2, 2, device=src_device) |
| 46 | + optimizer = optimizer_class(model.parameters(), lr=0.1) |
| 47 | + model(torch.randn(2, 2, device=src_device)).sum().backward() |
| 48 | + optimizer.step() |
| 49 | + _optimizer_to_device(optimizer, dst_device) |
| 50 | + _assert_opt_parameters_on_device(optimizer, dst_device) |
| 51 | + |
| 52 | + |
| 53 | +def _assert_opt_parameters_on_device(opt, device): |
| 54 | + for _, v in opt.state.items(): |
| 55 | + for key, item in v.items(): |
| 56 | + if not isinstance(item, Tensor): |
| 57 | + continue |
| 58 | + if key == "step": |
| 59 | + # The "step" tensor needs to remain on CPU |
| 60 | + assert item.device.type == "cpu" |
| 61 | + else: |
| 62 | + assert item.device.type == device.type |
| 63 | + |
| 64 | + |
| 65 | +@RunIf(min_cuda_gpus=1) |
| 66 | +@pytest.mark.parametrize("frozen", [True, False]) |
| 67 | +def test_optimizer_to_device_with_dataclass_in_state(frozen): |
| 68 | + src_device = torch.device("cpu") |
| 69 | + dst_device = torch.device("cuda") |
| 70 | + model = torch.nn.Linear(32, 2, device=src_device) |
| 71 | + |
| 72 | + @dataclasses.dataclass(frozen=frozen) |
11 | 73 | class FooState:
|
12 |
| - bar: int |
| 74 | + integer: int |
| 75 | + tensor: Tensor |
13 | 76 |
|
14 | 77 | class TestOptimizer(torch.optim.SGD):
|
15 | 78 | def __init__(self, *args, **kwargs):
|
16 | 79 | super().__init__(*args, **kwargs)
|
17 |
| - self.state["dummy"] = torch.tensor(0) |
18 |
| - self.state["frozen"] = FooState(0) |
19 |
| - |
20 |
| - layer = torch.nn.Linear(32, 2) |
21 |
| - opt = TestOptimizer(layer.parameters(), lr=0.1) |
22 |
| - _optimizer_to_device(opt, "cpu") |
23 |
| - if torch.cuda.is_available(): |
24 |
| - _optimizer_to_device(opt, "cuda") |
25 |
| - assert_opt_parameters_on_device(opt, "cuda") |
26 |
| - |
27 |
| - |
28 |
| -def assert_opt_parameters_on_device(opt, device: str): |
29 |
| - for param in opt.state.values(): |
30 |
| - # Not sure there are any global tensors in the state dict |
31 |
| - if isinstance(param, Tensor): |
32 |
| - assert param.data.device.type == device |
33 |
| - elif isinstance(param, collections.abc.Mapping): |
34 |
| - for subparam in param.values(): |
35 |
| - if isinstance(subparam, Tensor): |
36 |
| - assert param.data.device.type == device |
| 80 | + self.state[model.weight] = {"dummy": torch.tensor(0)} |
| 81 | + self.state[model.bias] = FooState(0, torch.tensor(0)) |
| 82 | + |
| 83 | + optimizer = TestOptimizer(model.parameters(), lr=0.1) |
| 84 | + _optimizer_to_device(optimizer, dst_device) |
| 85 | + assert optimizer.state[model.weight]["dummy"].device.type == dst_device.type |
| 86 | + assert optimizer.state[model.bias].tensor.device.type == ("cpu" if frozen else dst_device.type) |
0 commit comments