Skip to content

Commit 5ba85ba

Browse files
heathen711psychedelicious
authored andcommitted
Update supported cuda slot input.
1 parent 911baeb commit 5ba85ba

File tree

3 files changed

+4
-6
lines changed

3 files changed

+4
-6
lines changed

invokeai/app/services/config/config_default.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
INIT_FILE = Path("invokeai.yaml")
2525
DB_FILE = Path("invokeai.db")
2626
LEGACY_INIT_FILE = Path("invokeai.init")
27-
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
2827
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
2928
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
3029
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
@@ -47,7 +46,6 @@ def validate_url_regex(cls, v: str) -> str:
4746
raise ValueError(f"Invalid regex: {e}")
4847
return v
4948

50-
5149
class InvokeAIAppConfig(BaseSettings):
5250
"""Invoke's global app configuration.
5351
@@ -93,7 +91,7 @@ class InvokeAIAppConfig(BaseSettings):
9391
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
9492
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
9593
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
96-
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
94+
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
9795
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
9896
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
9997
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
@@ -176,7 +174,7 @@ class InvokeAIAppConfig(BaseSettings):
176174
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
177175

178176
# DEVICE
179-
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
177+
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
180178
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
181179

182180
# GENERATION

invokeai/frontend/web/src/services/api/schema.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12270,7 +12270,7 @@ export type components = {
1227012270
* @default auto
1227112271
* @enum {string}
1227212272
*/
12273-
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
12273+
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "cuda:2" | "cuda:3" | "mps";
1227412274
/**
1227512275
* Precision
1227612276
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.

tests/backend/util/test_devices.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from invokeai.app.services.config import get_config
1111
from invokeai.backend.util.devices import TorchDevice, choose_precision, choose_torch_device, torch_dtype
1212

13-
devices = ["cpu", "cuda:0", "cuda:1", "mps"]
13+
devices = ["cpu", "cuda:0", "cuda:1", "cuda:2", "mps"]
1414
device_types_cpu = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float32)]
1515
device_types_cuda = [("cpu", torch.float32), ("cuda:0", torch.float16), ("mps", torch.float32)]
1616
device_types_mps = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float16)]

0 commit comments

Comments
 (0)