|
231 | 231 | import diffusers
|
232 | 232 | from accelerate.logging import get_logger
|
233 | 233 | from accelerate import Accelerator
|
234 |
| -from accelerate.utils import set_seed |
| 234 | +from accelerate.utils import set_seed, ProjectConfiguration |
235 | 235 | from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
|
236 | 236 | from diffusers.optimization import get_scheduler
|
237 | 237 | from huggingface_hub import HfFolder, Repository, create_repo, whoami
|
238 | 238 | from tqdm.auto import tqdm
|
239 | 239 | from transformers import AutoTokenizer, PretrainedConfig
|
240 |
| -from diffusers.models.cross_attention import CrossAttention |
| 240 | +from diffusers.models.attention import Attention |
241 | 241 | from diffusers.utils.import_utils import is_xformers_available
|
242 | 242 | from diffusers.utils import check_min_version, is_wandb_available
|
243 | 243 |
|
@@ -273,7 +273,7 @@ def create_custom_diffusion(unet, freeze_model):
|
273 | 273 | # change attn class
|
274 | 274 | def change_attn(unet):
|
275 | 275 | for layer in unet.children():
|
276 |
| - if type(layer) == CrossAttention: |
| 276 | + if type(layer) == Attention: |
277 | 277 | bound_method = set_use_memory_efficient_attention_xformers.__get__(layer, layer.__class__)
|
278 | 278 | setattr(layer, 'set_use_memory_efficient_attention_xformers', bound_method)
|
279 | 279 | else:
|
@@ -593,6 +593,7 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
|
593 | 593 |
|
594 | 594 | def main(args):
|
595 | 595 | logging_dir = Path(args.output_dir, args.logging_dir)
|
| 596 | + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
596 | 597 |
|
597 | 598 | accelerator = Accelerator(
|
598 | 599 | gradient_accumulation_steps=args.gradient_accumulation_steps,
|
|
0 commit comments