Skip to content

Commit e8e2482

Browse files
authored
Modular backend - Seamless (#6651)
## Summary Seamless code from #6577. ## Related Issues / Discussions #6606 https://invokeai.notion.site/Modular-Stable-Diffusion-Backend-Design-Document-e8952daab5d5472faecdc4a72d377b0d ## QA Instructions Run with and without set `USE_MODULAR_DENOISE` environment. ## Merge Plan Nope. If you think that there should be some kind of tests - feel free to add. ## Checklist - [x] _The PR has a short but descriptive title, suitable for a changelog_ - [ ] _Tests added / updated (if applicable)_ - [ ] _Documentation added / updated (if applicable)_
2 parents daa5a88 + c57a7af commit e8e2482

File tree

5 files changed

+80
-57
lines changed

5 files changed

+80
-57
lines changed

invokeai/app/invocations/denoise_latents.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from invokeai.backend.lora import LoRAModelRaw
4040
from invokeai.backend.model_manager import BaseModelType
4141
from invokeai.backend.model_patcher import ModelPatcher
42-
from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless
42+
from invokeai.backend.stable_diffusion import PipelineIntermediateState
4343
from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs
4444
from invokeai.backend.stable_diffusion.diffusers_pipeline import (
4545
ControlNetData,
@@ -62,6 +62,7 @@
6262
from invokeai.backend.stable_diffusion.extensions.freeu import FreeUExt
6363
from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt
6464
from invokeai.backend.stable_diffusion.extensions.rescale_cfg import RescaleCFGExt
65+
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
6566
from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager
6667
from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP
6768
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
@@ -833,6 +834,10 @@ def step_callback(state: PipelineIntermediateState) -> None:
833834
if self.unet.freeu_config:
834835
ext_manager.add_extension(FreeUExt(self.unet.freeu_config))
835836

837+
### seamless
838+
if self.unet.seamless_axes:
839+
ext_manager.add_extension(SeamlessExt(self.unet.seamless_axes))
840+
836841
# context for loading additional models
837842
with ExitStack() as exit_stack:
838843
# later should be smth like:
@@ -915,7 +920,7 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
915920
ExitStack() as exit_stack,
916921
unet_info.model_on_device() as (model_state_dict, unet),
917922
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
918-
set_seamless(unet, self.unet.seamless_axes), # FIXME
923+
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
919924
# Apply the LoRA after unet has been moved to its target device for faster patching.
920925
ModelPatcher.apply_lora_unet(
921926
unet,

invokeai/app/invocations/latents_to_image.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from invokeai.app.invocations.model import VAEField
2525
from invokeai.app.invocations.primitives import ImageOutput
2626
from invokeai.app.services.shared.invocation_context import InvocationContext
27-
from invokeai.backend.stable_diffusion import set_seamless
27+
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
2828
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
2929
from invokeai.backend.util.devices import TorchDevice
3030

@@ -59,7 +59,7 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
5959

6060
vae_info = context.models.load(self.vae.vae)
6161
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
62-
with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae:
62+
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
6363
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
6464
latents = latents.to(vae.device)
6565
if self.fp32:

invokeai/backend/stable_diffusion/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@
77
StableDiffusionGeneratorPipeline,
88
)
99
from invokeai.backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent # noqa: F401
10-
from invokeai.backend.stable_diffusion.seamless import set_seamless # noqa: F401
1110

1211
__all__ = [
1312
"PipelineIntermediateState",
1413
"StableDiffusionGeneratorPipeline",
1514
"InvokeAIDiffuserComponent",
16-
"set_seamless",
1715
]
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
from __future__ import annotations
2+
3+
from contextlib import contextmanager
4+
from typing import Callable, Dict, List, Optional, Tuple
5+
6+
import torch
7+
import torch.nn as nn
8+
from diffusers import UNet2DConditionModel
9+
from diffusers.models.lora import LoRACompatibleConv
10+
11+
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
12+
13+
14+
class SeamlessExt(ExtensionBase):
15+
def __init__(
16+
self,
17+
seamless_axes: List[str],
18+
):
19+
super().__init__()
20+
self._seamless_axes = seamless_axes
21+
22+
@contextmanager
23+
def patch_unet(self, unet: UNet2DConditionModel, cached_weights: Optional[Dict[str, torch.Tensor]] = None):
24+
with self.static_patch_model(
25+
model=unet,
26+
seamless_axes=self._seamless_axes,
27+
):
28+
yield
29+
30+
@staticmethod
31+
@contextmanager
32+
def static_patch_model(
33+
model: torch.nn.Module,
34+
seamless_axes: List[str],
35+
):
36+
if not seamless_axes:
37+
yield
38+
return
39+
40+
x_mode = "circular" if "x" in seamless_axes else "constant"
41+
y_mode = "circular" if "y" in seamless_axes else "constant"
42+
43+
# override conv_forward
44+
# https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
45+
def _conv_forward_asymmetric(
46+
self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None
47+
):
48+
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
49+
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
50+
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
51+
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
52+
return torch.nn.functional.conv2d(
53+
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
54+
)
55+
56+
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
57+
try:
58+
for layer in model.modules():
59+
if not isinstance(layer, torch.nn.Conv2d):
60+
continue
61+
62+
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
63+
layer.lora_layer = lambda *x: 0
64+
original_layers.append((layer, layer._conv_forward))
65+
layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
66+
67+
yield
68+
69+
finally:
70+
for layer, orig_conv_forward in original_layers:
71+
layer._conv_forward = orig_conv_forward

invokeai/backend/stable_diffusion/seamless.py

Lines changed: 0 additions & 51 deletions
This file was deleted.

0 commit comments

Comments
 (0)