3
3
from warnings import warn
4
4
5
5
import pytest
6
- from torch import Generator , Tensor , allclose , device as Device , equal , isclose , randn , tensor
6
+ import torch
7
+ from torch import Tensor , device as Device
7
8
8
9
from refiners .fluxion import manual_seed
9
10
from refiners .foundationals .latent_diffusion .solvers import (
@@ -27,7 +28,7 @@ def test_ddpm_diffusers():
27
28
diffusers_scheduler = DDPMScheduler (beta_schedule = "scaled_linear" , beta_start = 0.00085 , beta_end = 0.012 )
28
29
diffusers_scheduler .set_timesteps (1000 )
29
30
solver = DDPM (num_inference_steps = 1000 )
30
- assert equal (diffusers_scheduler .timesteps , solver .timesteps )
31
+ assert torch . equal (diffusers_scheduler .timesteps , solver .timesteps )
31
32
32
33
33
34
@pytest .mark .parametrize (
@@ -58,10 +59,10 @@ def test_dpm_solver_diffusers(n_steps: int, last_step_first_order: bool, sde_var
58
59
sigma_schedule = NoiseSchedule .KARRAS if use_karras_sigmas else None ,
59
60
),
60
61
)
61
- assert equal (solver .timesteps , diffusers_scheduler .timesteps )
62
+ assert torch . equal (solver .timesteps , diffusers_scheduler .timesteps )
62
63
63
- sample = randn (1 , 3 , 32 , 32 )
64
- predicted_noise = randn (1 , 3 , 32 , 32 )
64
+ sample = torch . randn (1 , 3 , 32 , 32 )
65
+ predicted_noise = torch . randn (1 , 3 , 32 , 32 )
65
66
66
67
manual_seed (37 )
67
68
diffusers_outputs : list [Tensor ] = [
@@ -74,7 +75,7 @@ def test_dpm_solver_diffusers(n_steps: int, last_step_first_order: bool, sde_var
74
75
75
76
atol = 1e-4 if use_karras_sigmas else 1e-6
76
77
for step , (diffusers_output , refiners_output ) in enumerate (zip (diffusers_outputs , refiners_outputs )):
77
- assert allclose (diffusers_output , refiners_output , rtol = 0.01 , atol = atol ), f"outputs differ at step { step } "
78
+ assert torch . allclose (diffusers_output , refiners_output , rtol = 0.01 , atol = atol ), f"outputs differ at step { step } "
78
79
79
80
80
81
def test_ddim_diffusers ():
@@ -92,16 +93,16 @@ def test_ddim_diffusers():
92
93
)
93
94
diffusers_scheduler .set_timesteps (30 )
94
95
solver = DDIM (num_inference_steps = 30 )
95
- assert equal (solver .timesteps , diffusers_scheduler .timesteps )
96
+ assert torch . equal (solver .timesteps , diffusers_scheduler .timesteps )
96
97
97
- sample = randn (1 , 4 , 32 , 32 )
98
- predicted_noise = randn (1 , 4 , 32 , 32 )
98
+ sample = torch . randn (1 , 4 , 32 , 32 )
99
+ predicted_noise = torch . randn (1 , 4 , 32 , 32 )
99
100
100
101
for step , timestep in enumerate (diffusers_scheduler .timesteps ):
101
102
diffusers_output = cast (Tensor , diffusers_scheduler .step (predicted_noise , timestep , sample ).prev_sample ) # type: ignore
102
103
refiners_output = solver (x = sample , predicted_noise = predicted_noise , step = step )
103
104
104
- assert allclose (diffusers_output , refiners_output , rtol = 0.01 ), f"outputs differ at step { step } "
105
+ assert torch . allclose (diffusers_output , refiners_output , rtol = 0.01 ), f"outputs differ at step { step } "
105
106
106
107
107
108
@pytest .mark .parametrize ("model_prediction_type" , [ModelPredictionType .NOISE , ModelPredictionType .SAMPLE ])
@@ -122,20 +123,20 @@ def test_euler_diffusers(model_prediction_type: ModelPredictionType):
122
123
)
123
124
diffusers_scheduler .set_timesteps (30 )
124
125
solver = Euler (num_inference_steps = 30 , params = SolverParams (model_prediction_type = model_prediction_type ))
125
- assert equal (solver .timesteps , diffusers_scheduler .timesteps )
126
+ assert torch . equal (solver .timesteps , diffusers_scheduler .timesteps )
126
127
127
- sample = randn (1 , 4 , 32 , 32 )
128
- predicted_noise = randn (1 , 4 , 32 , 32 )
128
+ sample = torch . randn (1 , 4 , 32 , 32 )
129
+ predicted_noise = torch . randn (1 , 4 , 32 , 32 )
129
130
130
131
ref_init_noise_sigma = diffusers_scheduler .init_noise_sigma # type: ignore
131
132
assert isinstance (ref_init_noise_sigma , Tensor )
132
- assert isclose (ref_init_noise_sigma , solver .init_noise_sigma ), "init_noise_sigma differ"
133
+ assert torch . isclose (ref_init_noise_sigma , solver .init_noise_sigma ), "init_noise_sigma differ"
133
134
134
135
for step , timestep in enumerate (diffusers_scheduler .timesteps ):
135
136
diffusers_output = cast (Tensor , diffusers_scheduler .step (predicted_noise , timestep , sample ).prev_sample ) # type: ignore
136
137
refiners_output = solver (x = sample , predicted_noise = predicted_noise , step = step )
137
138
138
- assert allclose (diffusers_output , refiners_output , rtol = 0.02 ), f"outputs differ at step { step } "
139
+ assert torch . allclose (diffusers_output , refiners_output , rtol = 0.02 ), f"outputs differ at step { step } "
139
140
140
141
141
142
def test_franken_diffusers ():
@@ -157,21 +158,21 @@ def test_franken_diffusers():
157
158
158
159
diffusers_scheduler_2 = EulerDiscreteScheduler (** params ) # type: ignore
159
160
solver = FrankenSolver (lambda : diffusers_scheduler_2 , num_inference_steps = 30 )
160
- assert equal (solver .timesteps , diffusers_scheduler .timesteps )
161
+ assert torch . equal (solver .timesteps , diffusers_scheduler .timesteps )
161
162
162
- sample = randn (1 , 4 , 32 , 32 )
163
- predicted_noise = randn (1 , 4 , 32 , 32 )
163
+ sample = torch . randn (1 , 4 , 32 , 32 )
164
+ predicted_noise = torch . randn (1 , 4 , 32 , 32 )
164
165
165
166
ref_init_noise_sigma = diffusers_scheduler .init_noise_sigma # type: ignore
166
167
assert isinstance (ref_init_noise_sigma , Tensor )
167
- init_noise_sigma = solver .scale_model_input (tensor (1 ), step = - 1 )
168
- assert equal (ref_init_noise_sigma , init_noise_sigma ), "init_noise_sigma differ"
168
+ init_noise_sigma = solver .scale_model_input (torch . tensor (1 ), step = - 1 )
169
+ assert torch . equal (ref_init_noise_sigma , init_noise_sigma ), "init_noise_sigma differ"
169
170
170
171
for step , timestep in enumerate (diffusers_scheduler .timesteps ):
171
172
diffusers_output = cast (Tensor , diffusers_scheduler .step (predicted_noise , timestep , sample ).prev_sample ) # type: ignore
172
173
refiners_output = solver (x = sample , predicted_noise = predicted_noise , step = step )
173
174
174
- assert equal (diffusers_output , refiners_output ), f"outputs differ at step { step } "
175
+ assert torch . equal (diffusers_output , refiners_output ), f"outputs differ at step { step } "
175
176
176
177
177
178
def test_lcm_diffusers ():
@@ -180,16 +181,16 @@ def test_lcm_diffusers():
180
181
manual_seed (0 )
181
182
182
183
# LCMScheduler is stochastic, make sure we use identical generators
183
- diffusers_generator = Generator ().manual_seed (42 )
184
- refiners_generator = Generator ().manual_seed (42 )
184
+ diffusers_generator = torch . Generator ().manual_seed (42 )
185
+ refiners_generator = torch . Generator ().manual_seed (42 )
185
186
186
187
diffusers_scheduler = LCMScheduler ()
187
188
diffusers_scheduler .set_timesteps (4 )
188
189
solver = LCMSolver (num_inference_steps = 4 )
189
- assert equal (solver .timesteps , diffusers_scheduler .timesteps )
190
+ assert torch . equal (solver .timesteps , diffusers_scheduler .timesteps )
190
191
191
- sample = randn (1 , 4 , 32 , 32 )
192
- predicted_noise = randn (1 , 4 , 32 , 32 )
192
+ sample = torch . randn (1 , 4 , 32 , 32 )
193
+ predicted_noise = torch . randn (1 , 4 , 32 , 32 )
193
194
194
195
for step , timestep in enumerate (diffusers_scheduler .timesteps ):
195
196
alpha_prod_t = diffusers_scheduler .alphas_cumprod [timestep ]
@@ -212,7 +213,7 @@ def test_lcm_diffusers():
212
213
generator = refiners_generator ,
213
214
)
214
215
215
- assert allclose (refiners_output , diffusers_output , rtol = 0.01 ), f"outputs differ at step { step } "
216
+ assert torch . allclose (refiners_output , diffusers_output , rtol = 0.01 ), f"outputs differ at step { step } "
216
217
217
218
218
219
def test_solver_remove_noise ():
@@ -231,14 +232,14 @@ def test_solver_remove_noise():
231
232
diffusers_scheduler .set_timesteps (30 )
232
233
solver = DDIM (num_inference_steps = 30 )
233
234
234
- sample = randn (1 , 4 , 32 , 32 )
235
- noise = randn (1 , 4 , 32 , 32 )
235
+ sample = torch . randn (1 , 4 , 32 , 32 )
236
+ noise = torch . randn (1 , 4 , 32 , 32 )
236
237
237
238
for step , timestep in enumerate (diffusers_scheduler .timesteps ):
238
239
diffusers_output = cast (Tensor , diffusers_scheduler .step (noise , timestep , sample ).pred_original_sample ) # type: ignore
239
240
refiners_output = solver .remove_noise (x = sample , noise = noise , step = step )
240
241
241
- assert allclose (diffusers_output , refiners_output , rtol = 0.01 ), f"outputs differ at step { step } "
242
+ assert torch . allclose (diffusers_output , refiners_output , rtol = 0.01 ), f"outputs differ at step { step } "
242
243
243
244
244
245
def test_solver_device (test_device : Device ):
@@ -247,16 +248,16 @@ def test_solver_device(test_device: Device):
247
248
pytest .skip ()
248
249
249
250
scheduler = DDIM (num_inference_steps = 30 , device = test_device )
250
- x = randn (1 , 4 , 32 , 32 , device = test_device )
251
- noise = randn (1 , 4 , 32 , 32 , device = test_device )
251
+ x = torch . randn (1 , 4 , 32 , 32 , device = test_device )
252
+ noise = torch . randn (1 , 4 , 32 , 32 , device = test_device )
252
253
noised = scheduler .add_noise (x , noise , scheduler .first_inference_step )
253
254
assert noised .device == test_device
254
255
255
256
256
257
def test_solver_add_noise (test_device : Device ):
257
258
scheduler = DDIM (num_inference_steps = 30 , device = test_device )
258
- latent = randn (1 , 4 , 32 , 32 , device = test_device )
259
- noise = randn (1 , 4 , 32 , 32 , device = test_device )
259
+ latent = torch . randn (1 , 4 , 32 , 32 , device = test_device )
260
+ noise = torch . randn (1 , 4 , 32 , 32 , device = test_device )
260
261
noised = scheduler .add_noise (
261
262
x = latent ,
262
263
noise = noise ,
@@ -267,8 +268,8 @@ def test_solver_add_noise(test_device: Device):
267
268
noise = noise .repeat (2 , 1 , 1 , 1 ),
268
269
step = [0 , 0 ],
269
270
)
270
- assert allclose (noised , noised_double [0 ])
271
- assert allclose (noised , noised_double [1 ])
271
+ assert torch . allclose (noised , noised_double [0 ])
272
+ assert torch . allclose (noised , noised_double [1 ])
272
273
273
274
274
275
@pytest .mark .parametrize ("noise_schedule" , [NoiseSchedule .UNIFORM , NoiseSchedule .QUADRATIC , NoiseSchedule .KARRAS ])
@@ -291,20 +292,27 @@ def test_solver_timestep_spacing():
291
292
num_train_timesteps = 1000 ,
292
293
offset = 1 ,
293
294
)
294
- assert equal (linspace_int , tensor ([1000 , 889 , 778 , 667 , 556 , 445 , 334 , 223 , 112 , 1 ]))
295
+ assert torch . equal (linspace_int , torch . tensor ([1000 , 889 , 778 , 667 , 556 , 445 , 334 , 223 , 112 , 1 ]))
295
296
296
297
leading = Solver .generate_timesteps (
297
298
spacing = TimestepSpacing .LEADING ,
298
299
num_inference_steps = 10 ,
299
300
num_train_timesteps = 1000 ,
300
301
offset = 1 ,
301
302
)
302
- assert equal (leading , tensor ([901 , 801 , 701 , 601 , 501 , 401 , 301 , 201 , 101 , 1 ]))
303
+ assert torch . equal (leading , torch . tensor ([901 , 801 , 701 , 601 , 501 , 401 , 301 , 201 , 101 , 1 ]))
303
304
304
305
trailing = Solver .generate_timesteps (
305
306
spacing = TimestepSpacing .TRAILING ,
306
307
num_inference_steps = 10 ,
307
308
num_train_timesteps = 1000 ,
308
309
offset = 1 ,
309
310
)
310
- assert equal (trailing , tensor ([1000 , 900 , 800 , 700 , 600 , 500 , 400 , 300 , 200 , 100 ]))
311
+ assert torch .equal (trailing , torch .tensor ([1000 , 900 , 800 , 700 , 600 , 500 , 400 , 300 , 200 , 100 ]))
312
+
313
+
314
+ def test_dpm_bfloat16 (test_device : Device ):
315
+ if test_device .type == "cpu" :
316
+ warn ("not running on CPU, skipping" )
317
+ pytest .skip ()
318
+ DPMSolver (num_inference_steps = 5 , dtype = torch .bfloat16 ) # should not raise
0 commit comments