Description
Is there an existing issue for this problem?
- I have searched the existing issues
Operating system
Linux
GPU vendor
AMD (ROCm)
GPU model
AMD Radeon PRO V620
GPU VRAM
30GB
Version number
5.14.0
Browser
Firefox 139.0.1
Python dependencies
{
"version": "5.14.0",
"dependencies": {
"accelerate" : "1.7.0" ,
"compel" : "2.1.1" ,
"cuda" : null ,
"diffusers" : "0.33.0" ,
"numpy" : "1.26.3" ,
"opencv" : "4.11.0.86" ,
"onnx" : "1.16.1" ,
"pillow" : "11.0.0" ,
"python" : "3.12.11" ,
"torch" : "2.7.1+rocm6.2.4" ,
"torchvision" : "0.22.1+rocm6.2.4",
"transformers": "4.52.4" ,
"xformers" : null
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "0.0.0.0",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": [""],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models/.convert_cache",
"download_cache_dir": "models/.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console", "file=./output.log"],
"log_format": "plain",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": false,
"device_working_mem_gb": 6,
"enable_partial_loading": false,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": null,
"device": "cuda",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": [ {"url_regex": "civitai.com", "token": "REDACTED"} ],
"scan_models_on_startup": false
},
"set_config_fields": [
"remote_api_tokens" , "device_working_mem_gb" , "device" , "log_format" ,
"host" , "legacy_models_yaml_path", "log_handlers"
]
}
What happened
RuntimeError: Failed to load config file /mnt/storage/invokeai/invokeai.yaml: 1 validation error for InvokeAIAppConfig
device
Input should be 'auto', 'cpu', 'cuda', 'cuda:1' or 'mps' [type=literal_error, input_value='cuda:3', input_type=str]
For further information visit https://errors.pydantic.dev/2.11/v/literal_error
Was trying to make invokeAI use my 4th GPU card, but the check blocks me from specifying that slot index.
What you expected to happen
Ability to set slot index for higher card count.
How to reproduce the problem
device: cuda3
in invokeai.yml
Additional context
No response
Discord username
No response