From 1519ecee2ebc637212027cfe538880d9c88e285c Mon Sep 17 00:00:00 2001 From: stevhliu Date: Thu, 12 Jun 2025 15:53:27 -0700 Subject: [PATCH 1/7] draft --- docs/source/en/_toctree.yml | 2 + docs/source/en/optimization/memory.md | 8 +- .../en/optimization/speed-memory-optims.md | 148 ++++++++++++++++++ 3 files changed, 154 insertions(+), 4 deletions(-) create mode 100644 docs/source/en/optimization/speed-memory-optims.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 0530e11ac25e..12fd02129bd1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -182,6 +182,8 @@ title: Reduce memory usage - local: optimization/pruna title: Pruna + - local: optimization/speed-memory-optims + title: Compile and offloading - local: optimization/xformers title: xFormers - local: optimization/tome diff --git a/docs/source/en/optimization/memory.md b/docs/source/en/optimization/memory.md index 6b853a7a084b..1d8fe0bed96f 100644 --- a/docs/source/en/optimization/memory.md +++ b/docs/source/en/optimization/memory.md @@ -17,7 +17,7 @@ Modern diffusion models like [Flux](../api/pipelines/flux) and [Wan](../api/pipe This guide will show you how to reduce your memory usage. > [!TIP] -> Keep in mind these techniques may need to be adjusted depending on the model! For example, a transformer-based diffusion model may not benefit equally from these inference speed optimizations as a UNet-based model. +> Keep in mind these techniques may need to be adjusted depending on the model. For example, a transformer-based diffusion model may not benefit equally from these memory optimizations as a UNet-based model. ## Multiple GPUs @@ -145,7 +145,7 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G ``` > [!WARNING] -> [`AutoencoderKLWan`] and [`AsymmetricAutoencoderKL`] don't support slicing. +> The [`AutoencoderKLWan`] and [`AsymmetricAutoencoderKL`] classes don't support slicing. ## VAE tiling @@ -219,7 +219,7 @@ from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16 ) -pipline.enable_model_cpu_offload() +pipeline.enable_model_cpu_offload() pipeline( prompt="An astronaut riding a horse on Mars", @@ -493,7 +493,7 @@ with torch.inference_mode(): ## Memory-efficient attention > [!TIP] -> Memory-efficient attention optimizes for memory usage *and* [inference speed](./fp16#scaled-dot-product-attention! +> Memory-efficient attention optimizes for memory usage *and* [inference speed](./fp16#scaled-dot-product-attention)! The Transformers attention mechanism is memory-intensive, especially for long sequences, so you can try using different and more memory-efficient attention types. diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md new file mode 100644 index 000000000000..f825c8d0eeb1 --- /dev/null +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -0,0 +1,148 @@ + + +# Compile and offloading + +There are trade-offs associated with optimizing solely for [inference speed](./fp16) or [memory-usage](./memory). For example, [caching](./cache) increases inference speed but requires more memory to store the intermediate outputs from the attention layers. + +If your hardware is sufficiently powerful, you can choose to focus on one or the other. For a more balanced approach that doesn't sacrifice too much in terms of inference speed and memory-usage, try compiling and offloading a model. + +Refer to the table below for the latency and memory-usage of each combination. + +| combination | latency | memory usage | +|---|---|---| +| quantization, torch.compile | | | +| quantization, torch.compile, model CPU offloading | | | +| quantization, torch.compile, group offloading | | | + +This guide will show you how to compile and offload a model to improve both inference speed and memory-usage. + +## Quantization and torch.compile + +> [!TIP] +> The quantization backend, such as [bitsandbytes](../quantization/bitsandbytes#torchcompile), must be compatible with torch.compile. Refer to the quantization [overview](https://huggingface.co/docs/transformers/quantization/overview#overview) table to see which backends support torch.compile. + +Start by [quantizing](../quantization/overview) a model to reduce the memory required to store it and [compiling](./fp16#torchcompile) it to accelerate inference. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline(""" + cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California + highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +).images[0] +``` + +## Quantization, torch.compile, and offloading + +In addition to quantization and torch.compile, try offloading if you need to reduce memory-usage further. Offloading moves various layers or model components from the CPU to the GPU as needed for computations. + + + + +[Model CPU offloading](./memory#model-offloading) moves an individual pipeline component, like the transformer model, to the GPU when it is needed for computation. Otherwise, it is offloaded to the CPU. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# model CPU offloading +pipeline.enable_model_cpu_offload() + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline( + "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" +).images[0] +``` + + + + +[Group offloading](./memory#group-offloading) moves the internal layers of an individual pipeline component, like the transformer model, to the GPU for computation and offloads it when it's not required. At the same time, it uses the [CUDA stream](./memory#cuda-stream) feature to prefetch the next layer for execution. + +By overlapping computation and data transfer, it is faster than model CPU offloading while also saving memory. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.hooks import apply_group_offloading +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# group offloading +onload_device = torch.device("cuda") +offload_device = torch.device("cpu") + +pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True) +pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level", use_stream=True) +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline( + "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" +).images[0] +``` + + + \ No newline at end of file From 4a35656c3b5eb007291582273f9f78ebbc739bf3 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Fri, 13 Jun 2025 15:07:37 -0700 Subject: [PATCH 2/7] feedback --- .../en/optimization/speed-memory-optims.md | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index f825c8d0eeb1..e15ca3d7ea5c 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -12,26 +12,26 @@ specific language governing permissions and limitations under the License. # Compile and offloading -There are trade-offs associated with optimizing solely for [inference speed](./fp16) or [memory-usage](./memory). For example, [caching](./cache) increases inference speed but requires more memory to store the intermediate outputs from the attention layers. +When optimizing models, you often face trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it comes at the cost of increased memory consumption since it needs to store intermediate attention layer outputs. -If your hardware is sufficiently powerful, you can choose to focus on one or the other. For a more balanced approach that doesn't sacrifice too much in terms of inference speed and memory-usage, try compiling and offloading a model. +A more balanced optimization strategy combines [torch.compile](./fp16#torchcompile) with various offloading methods. This approach not only accelerates inference but also helps lower memory-usage. -Refer to the table below for the latency and memory-usage of each combination. +The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage. -| combination | latency | memory usage | +| combination | latency | memory-usage | |---|---|---| | quantization, torch.compile | | | | quantization, torch.compile, model CPU offloading | | | | quantization, torch.compile, group offloading | | | -This guide will show you how to compile and offload a model to improve both inference speed and memory-usage. +This guide will show you how to compile and offload a model. ## Quantization and torch.compile > [!TIP] > The quantization backend, such as [bitsandbytes](../quantization/bitsandbytes#torchcompile), must be compatible with torch.compile. Refer to the quantization [overview](https://huggingface.co/docs/transformers/quantization/overview#overview) table to see which backends support torch.compile. -Start by [quantizing](../quantization/overview) a model to reduce the memory required to store it and [compiling](./fp16#torchcompile) it to accelerate inference. +Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference. ```py import torch @@ -52,9 +52,7 @@ pipeline = DiffusionPipeline.from_pretrained( # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline(""" cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain @@ -93,9 +91,7 @@ pipeline.enable_model_cpu_offload() # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0] @@ -132,13 +128,12 @@ offload_device = torch.device("cpu") pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True) pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level", use_stream=True) -apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="leaf_level", use_stream=True) +apply_group_offloading(pipeline.text_encoder_2, onload_device=onload_device, offload_type="leaf_level", use_stream=True) # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0] From d834db7a7d294aa0a273a75c6a9d0b75cc7adbd8 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Mon, 16 Jun 2025 11:44:05 -0700 Subject: [PATCH 3/7] update --- docs/source/en/_toctree.yml | 2 +- docs/source/en/optimization/memory.md | 21 ++++++++-------- .../en/optimization/speed-memory-optims.md | 25 +++++++++++++------ 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 12fd02129bd1..37f4dcc01e41 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -183,7 +183,7 @@ - local: optimization/pruna title: Pruna - local: optimization/speed-memory-optims - title: Compile and offloading + title: Compile and offloading quantized models - local: optimization/xformers title: xFormers - local: optimization/tome diff --git a/docs/source/en/optimization/memory.md b/docs/source/en/optimization/memory.md index 1d8fe0bed96f..f19a803a7ee6 100644 --- a/docs/source/en/optimization/memory.md +++ b/docs/source/en/optimization/memory.md @@ -172,7 +172,13 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G > [!WARNING] > [`AutoencoderKLWan`] and [`AsymmetricAutoencoderKL`] don't support tiling. -## CPU offloading +## Offloading + +Offloading strategies move not currently active layers or models to the CPU to avoid increasing GPU memory. These strategies can be combined with quantization and torch.compile to balance inference speed and memory usage. + +Refer to the [Compile and offloading quantized models](./speed-memory-optims) guide for more details. + +### CPU offloading CPU offloading selectively moves weights from the GPU to the CPU. When a component is required, it is transferred to the GPU and when it isn't required, it is moved to the CPU. This method works on submodules rather than whole models. It saves memory by avoiding storing the entire model on the GPU. @@ -203,7 +209,7 @@ pipeline( print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") ``` -## Model offloading +### Model offloading Model offloading moves entire models to the GPU instead of selectively moving *some* layers or model components. One of the main pipeline models, usually the text encoder, UNet, and VAE, is placed on the GPU while the other components are held on the CPU. Components like the UNet that run multiple times stays on the GPU until its completely finished and no longer needed. This eliminates the communication overhead of [CPU offloading](#cpu-offloading) and makes model offloading a faster alternative. The tradeoff is memory savings won't be as large. @@ -234,7 +240,7 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G [`~DiffusionPipeline.enable_model_cpu_offload`] also helps when you're using the [`~StableDiffusionXLPipeline.encode_prompt`] method on its own to generate the text encoders hidden state. -## Group offloading +### Group offloading Group offloading moves groups of internal layers ([torch.nn.ModuleList](https://pytorch.org/docs/stable/generated/torch.nn.ModuleList.html) or [torch.nn.Sequential](https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html)) to the CPU. It uses less memory than [model offloading](#model-offloading) and it is faster than [CPU offloading](#cpu-offloading) because it reduces communication overhead. @@ -278,7 +284,7 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G export_to_video(video, "output.mp4", fps=8) ``` -### CUDA stream +#### CUDA stream The `use_stream` parameter can be activated for CUDA devices that support asynchronous data transfer streams to reduce overall execution time compared to [CPU offloading](#cpu-offloading). It overlaps data transfer and computation by using layer prefetching. The next layer to be executed is loaded onto the GPU while the current layer is still being executed. It can increase CPU memory significantly so ensure you have 2x the amount of memory as the model size. @@ -295,13 +301,6 @@ pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_d The `low_cpu_mem_usage` parameter can be set to `True` to reduce CPU memory usage when using streams during group offloading. It is best for `leaf_level` offloading and when CPU memory is bottlenecked. Memory is saved by creating pinned tensors on the fly instead of pre-pinning them. However, this may increase overall execution time. - - -The offloading strategies can be combined with [quantization](../quantization/overview.md) to enable further memory savings. For image generation, combining [quantization and model offloading](#model-offloading) can often give the best trade-off between quality, speed, and memory. However, for video generation, as the models are more -compute-bound, [group-offloading](#group-offloading) tends to be better. Group offloading provides considerable benefits when weight transfers can be overlapped with computation (must use streams). When applying group offloading with quantization on image generation models at typical resolutions (1024x1024, for example), it is usually not possible to *fully* overlap weight transfers if the compute kernel finishes faster, making it communication bound between CPU/GPU (due to device synchronizations). - - - ## Layerwise casting Layerwise casting stores weights in a smaller data format (for example, `torch.float8_e4m3fn` and `torch.float8_e5m2`) to use less memory and upcasts those weights to a higher precision like `torch.float16` or `torch.bfloat16` for computation. Certain layers (normalization and modulation related weights) are skipped because storing them in fp8 can degrade generation quality. diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index e15ca3d7ea5c..57d8b4fca878 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -10,21 +10,30 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Compile and offloading +# Compile and offloading quantized models When optimizing models, you often face trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it comes at the cost of increased memory consumption since it needs to store intermediate attention layer outputs. -A more balanced optimization strategy combines [torch.compile](./fp16#torchcompile) with various offloading methods. This approach not only accelerates inference but also helps lower memory-usage. +A more balanced optimization strategy combines [torch.compile](./fp16#torchcompile) with various [offloading methods](./memory#offloading) on a quantized model. This approach not only accelerates inference but also helps lower memory-usage. -The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage. +For image generation, combining quantization and [model offloading](./memory#model-offloading) can often give the best trade-off between quality, speed, and memory. Group offloading is not as effective because it is usually not possible to *fully* overlap data transfer if the compute kernel finishes faster. This results in some communication overhead between the CPU and GPU. -| combination | latency | memory-usage | +For video generation, combining quantization and [group-offloading](./memory#group-offloading) tends to be better because video models are more compute-bound. + +The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux. + +| combination | latency (s) | memory-usage (GB) | |---|---|---| -| quantization, torch.compile | | | -| quantization, torch.compile, model CPU offloading | | | -| quantization, torch.compile, group offloading | | | +| quantization | 32.602 | 14.9453 | +| quantization, torch.compile | 25.847 | 14.9448 | +| quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | +| quantization, torch.compile, group offloading | 60.235 | 12.2369 | +These results are benchmarked on Flux with a RTX 4090. The `transformer` and `text_encoder_2` components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model. + +> [!TIP] +> We recommend installing [PyTorch nightly](https://pytorch.org/get-started/locally/) for better torch.compile support. -This guide will show you how to compile and offload a model. +This guide will show you how to compile and offload a quantized model. ## Quantization and torch.compile From 971411d04b7d4cf2d1f17b65b1647356862992ac Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Jun 2025 13:37:38 -0700 Subject: [PATCH 4/7] feedback --- .../en/optimization/speed-memory-optims.md | 93 ++++++++++++++----- 1 file changed, 70 insertions(+), 23 deletions(-) diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index 57d8b4fca878..fd4c81b1a98b 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -14,39 +14,41 @@ specific language governing permissions and limitations under the License. When optimizing models, you often face trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it comes at the cost of increased memory consumption since it needs to store intermediate attention layer outputs. -A more balanced optimization strategy combines [torch.compile](./fp16#torchcompile) with various [offloading methods](./memory#offloading) on a quantized model. This approach not only accelerates inference but also helps lower memory-usage. +A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). This approach not only accelerates inference but also helps lower memory-usage. For image generation, combining quantization and [model offloading](./memory#model-offloading) can often give the best trade-off between quality, speed, and memory. Group offloading is not as effective because it is usually not possible to *fully* overlap data transfer if the compute kernel finishes faster. This results in some communication overhead between the CPU and GPU. For video generation, combining quantization and [group-offloading](./memory#group-offloading) tends to be better because video models are more compute-bound. -The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux. +The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux and Wan. | combination | latency (s) | memory-usage (GB) | |---|---|---| -| quantization | 32.602 | 14.9453 | -| quantization, torch.compile | 25.847 | 14.9448 | -| quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | -| quantization, torch.compile, group offloading | 60.235 | 12.2369 | -These results are benchmarked on Flux with a RTX 4090. The `transformer` and `text_encoder_2` components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model. +| quantization (Flux) | 32.602 | 14.9453 | +| quantization, torch.compile (Flux) | 25.847 | 14.9448 | +| quantization, torch.compile, model CPU offloading (Flux) | 32.312 | 12.2369 | +| quantization, torch.compile, group offloading (Wan) | | | +These results are benchmarked on Flux and Wan with a RTX 4090. The `transformer` and `text_encoder` components are quantized. Refer to the if you're interested in evaluating your own model. -> [!TIP] -> We recommend installing [PyTorch nightly](https://pytorch.org/get-started/locally/) for better torch.compile support. +This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. -This guide will show you how to compile and offload a quantized model. +```bash +pip install -U bitsandbytes +``` ## Quantization and torch.compile -> [!TIP] -> The quantization backend, such as [bitsandbytes](../quantization/bitsandbytes#torchcompile), must be compatible with torch.compile. Refer to the quantization [overview](https://huggingface.co/docs/transformers/quantization/overview#overview) table to see which backends support torch.compile. - Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference. +Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) cache size to allow recompiling up to a limit in case some guards fail. + ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig +torch._dynamo.config.cache_size_limit = 1000 + # quantize pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", @@ -73,6 +75,8 @@ pipeline(""" In addition to quantization and torch.compile, try offloading if you need to reduce memory-usage further. Offloading moves various layers or model components from the CPU to the GPU as needed for computations. +Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) cache size to allow recompiling up to a limit in case some guards fail. + @@ -83,6 +87,8 @@ import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig +torch._dynamo.config.cache_size_limit = 1000 + # quantize pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", @@ -114,19 +120,28 @@ pipeline( By overlapping computation and data transfer, it is faster than model CPU offloading while also saving memory. ```py +# pip install ftfy import torch -from diffusers import DiffusionPipeline +from diffusers import AutoModel, DiffusionPipeline from diffusers.hooks import apply_group_offloading +from diffusers.utils import export_to_video from diffusers.quantizers import PipelineQuantizationConfig +from transformers import UMT5EncoderModel + +torch._dynamo.config.cache_size_limit = 1000 # quantize pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, - components_to_quantize=["transformer", "text_encoder_2"], + components_to_quantize=["transformer", "text_encoder"], +) + +text_encoder = UMT5EncoderModel.from_pretrained( + "Wan-AI/Wan2.1-T2V-14B-Diffusers", subfolder="text_encoder", torch_dtype=torch.bfloat16 ) pipeline = DiffusionPipeline.from_pretrained( - "black-forest-labs/FLUX.1-dev", + "Wan-AI/Wan2.1-T2V-14B-Diffusers", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") @@ -135,17 +150,49 @@ pipeline = DiffusionPipeline.from_pretrained( onload_device = torch.device("cuda") offload_device = torch.device("cpu") -pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True) -pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level", use_stream=True) -apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="leaf_level", use_stream=True) -apply_group_offloading(pipeline.text_encoder_2, onload_device=onload_device, offload_type="leaf_level", use_stream=True) +pipeline.transformer.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type="block_level", + num_blocks_per_group=4 +) +pipeline.vae.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type="block_level", + num_blocks_per_group=4 +) +apply_group_offloading( + pipeline.text_encoder, + onload_device=onload_device, + offload_type="block_level", + num_blocks_per_group=2 +) # compile pipeline.transformer.to(memory_format=torch.channels_last) pipeline.transformer.compile( mode="max-autotune", fullgraph=True) -pipeline( - "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" -).images[0] + +prompt = """ +The camera rushes from far to near in a low-angle shot, +revealing a white ferret on a log. It plays, leaps into the water, and emerges, as the camera zooms in +for a close-up. Water splashes berry bushes nearby, while moss, snow, and leaves blanket the ground. +Birch trees and a light blue sky frame the scene, with ferns in the foreground. Side lighting casts dynamic +shadows and warm highlights. Medium composition, front view, low angle, with depth of field. +""" +negative_prompt = """ +Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, +low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, +misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards +""" + +output = pipeline( + prompt=prompt, + negative_prompt=negative_prompt, + num_frames=81, + guidance_scale=5.0, +).frames[0] +export_to_video(output, "output.mp4", fps=16) ``` From b483f2456171e720b1142a2d1669aabfab8fa54b Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Jun 2025 13:39:06 -0700 Subject: [PATCH 5/7] fix --- docs/source/en/_toctree.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 37f4dcc01e41..283efeef72c1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -180,10 +180,10 @@ title: Caching - local: optimization/memory title: Reduce memory usage - - local: optimization/pruna - title: Pruna - local: optimization/speed-memory-optims title: Compile and offloading quantized models + - local: optimization/pruna + title: Pruna - local: optimization/xformers title: xFormers - local: optimization/tome From b5d5e99c8b991a8d54e8bd38d134a0a647340c20 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 18 Jun 2025 10:09:51 -0700 Subject: [PATCH 6/7] feedback --- docs/source/en/optimization/memory.md | 3 ++ .../en/optimization/speed-memory-optims.md | 36 ++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/docs/source/en/optimization/memory.md b/docs/source/en/optimization/memory.md index f19a803a7ee6..4ecddf10e878 100644 --- a/docs/source/en/optimization/memory.md +++ b/docs/source/en/optimization/memory.md @@ -303,6 +303,9 @@ The `low_cpu_mem_usage` parameter can be set to `True` to reduce CPU memory usag ## Layerwise casting +> [!TIP] +> Combine layerwise casting with [group offloading](#group-offloading) for even more memory savings. + Layerwise casting stores weights in a smaller data format (for example, `torch.float8_e4m3fn` and `torch.float8_e5m2`) to use less memory and upcasts those weights to a higher precision like `torch.float16` or `torch.bfloat16` for computation. Certain layers (normalization and modulation related weights) are skipped because storing them in fp8 can degrade generation quality. > [!WARNING] diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index fd4c81b1a98b..8271929b27c8 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -20,15 +20,14 @@ For image generation, combining quantization and [model offloading](./memory#mod For video generation, combining quantization and [group-offloading](./memory#group-offloading) tends to be better because video models are more compute-bound. -The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux and Wan. +The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux. | combination | latency (s) | memory-usage (GB) | |---|---|---| -| quantization (Flux) | 32.602 | 14.9453 | -| quantization, torch.compile (Flux) | 25.847 | 14.9448 | -| quantization, torch.compile, model CPU offloading (Flux) | 32.312 | 12.2369 | -| quantization, torch.compile, group offloading (Wan) | | | -These results are benchmarked on Flux and Wan with a RTX 4090. The `transformer` and `text_encoder` components are quantized. Refer to the if you're interested in evaluating your own model. +| quantization | 32.602 | 14.9453 | +| quantization, torch.compile | 25.847 | 14.9448 | +| quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | +These results are benchmarked on Flux with a RTX 4090. The `transformer` and `text_encoder` components are quantized. Refer to the if you're interested in evaluating your own model. This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. @@ -40,14 +39,14 @@ pip install -U bitsandbytes Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference. -Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) cache size to allow recompiling up to a limit in case some guards fail. +Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` to handle dynamic outputs when compiling bnb models with `fullgraph=True`. ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig -torch._dynamo.config.cache_size_limit = 1000 +torch._dynamo.config.capture_dynamic_output_shape_ops = True # quantize pipeline_quant_config = PipelineQuantizationConfig( @@ -75,7 +74,7 @@ pipeline(""" In addition to quantization and torch.compile, try offloading if you need to reduce memory-usage further. Offloading moves various layers or model components from the CPU to the GPU as needed for computations. -Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) cache size to allow recompiling up to a limit in case some guards fail. +Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `cache_size_limit` during offloading to avoid excessive recompilation. @@ -106,7 +105,7 @@ pipeline.enable_model_cpu_offload() # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer.compile( mode="max-autotune", fullgraph=True) +pipeline.transformer.compile() pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0] @@ -153,25 +152,28 @@ offload_device = torch.device("cpu") pipeline.transformer.enable_group_offload( onload_device=onload_device, offload_device=offload_device, - offload_type="block_level", - num_blocks_per_group=4 + offload_type="leaf_level", + use_stream=True, + non_blocking=True ) pipeline.vae.enable_group_offload( onload_device=onload_device, offload_device=offload_device, - offload_type="block_level", - num_blocks_per_group=4 + offload_type="leaf_level", + use_stream=True, + non_blocking=True ) apply_group_offloading( pipeline.text_encoder, onload_device=onload_device, - offload_type="block_level", - num_blocks_per_group=2 + offload_type="leaf_level", + use_stream=True, + non_blocking=True ) # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer.compile( mode="max-autotune", fullgraph=True) +pipeline.transformer.compile() prompt = """ The camera rushes from far to near in a low-angle shot, From dc32d45ab80c8ff814c501e6c5e5ddd93fa6fc5a Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 18 Jun 2025 10:24:41 -0700 Subject: [PATCH 7/7] feedback --- docs/source/en/optimization/speed-memory-optims.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index 8271929b27c8..825afca57b14 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -12,11 +12,9 @@ specific language governing permissions and limitations under the License. # Compile and offloading quantized models -When optimizing models, you often face trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it comes at the cost of increased memory consumption since it needs to store intermediate attention layer outputs. +Optimizing models often involves trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it also increases memory consumption since it needs to store the outputs of intermediate attention layers. A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). -A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). This approach not only accelerates inference but also helps lower memory-usage. - -For image generation, combining quantization and [model offloading](./memory#model-offloading) can often give the best trade-off between quality, speed, and memory. Group offloading is not as effective because it is usually not possible to *fully* overlap data transfer if the compute kernel finishes faster. This results in some communication overhead between the CPU and GPU. +For image generation, combining quantization and [model offloading](./memory#model-offloading) can often give the best trade-off between quality, speed, and memory. Group offloading is not as effective for image generation because it is usually not possible to *fully* overlap data transfer if the compute kernel finishes faster. This results in some communication overhead between the CPU and GPU. For video generation, combining quantization and [group-offloading](./memory#group-offloading) tends to be better because video models are more compute-bound. @@ -27,7 +25,7 @@ The table below provides a comparison of optimization strategy combinations and | quantization | 32.602 | 14.9453 | | quantization, torch.compile | 25.847 | 14.9448 | | quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | -These results are benchmarked on Flux with a RTX 4090. The `transformer` and `text_encoder` components are quantized. Refer to the if you're interested in evaluating your own model. +These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the if you're interested in evaluating your own model. This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. @@ -39,7 +37,7 @@ pip install -U bitsandbytes Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference. -Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` to handle dynamic outputs when compiling bnb models with `fullgraph=True`. +Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` to handle dynamic outputs when compiling bitsandbytes models with `fullgraph=True`. ```py import torch @@ -104,7 +102,6 @@ pipeline = DiffusionPipeline.from_pretrained( pipeline.enable_model_cpu_offload() # compile -pipeline.transformer.to(memory_format=torch.channels_last) pipeline.transformer.compile() pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" @@ -172,7 +169,6 @@ apply_group_offloading( ) # compile -pipeline.transformer.to(memory_format=torch.channels_last) pipeline.transformer.compile() prompt = """