We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4428a6e commit e46ce79Copy full SHA for e46ce79
sota-implementations/grpo/grpo_utils.py
@@ -51,9 +51,9 @@ def get_train_model(
51
max_memory = {}
52
for i in range(torch.cuda.device_count()):
53
if i in train_devices:
54
- max_memory[f"cuda:{i}"] = "24GiB" # Allow max memory for devices we want to use
+ max_memory[i] = "24GiB" # Allow max memory for devices we want to use
55
else:
56
- max_memory[f"cuda:{i}"] = "0GiB" # No memory for other devices
+ max_memory[i] = "0GiB" # No memory for other devices
57
max_memory["cpu"] = "24GiB" # Allow CPU memory as fallback
58
59
# Let HF handle distribution with max_memory
0 commit comments