We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e46ce79 commit c0b8623Copy full SHA for c0b8623
sota-implementations/grpo/grpo_utils.py
@@ -162,9 +162,9 @@ def get_ref_model(
162
max_memory = {}
163
for i in range(torch.cuda.device_count()):
164
if i in ref_devices:
165
- max_memory[f"cuda:{i}"] = "24GiB" # Allow max memory for devices we want to use
+ max_memory[i] = "24GiB" # Allow max memory for devices we want to use
166
else:
167
- max_memory[f"cuda:{i}"] = "0GiB" # No memory for other devices
+ max_memory[i] = "0GiB" # No memory for other devices
168
max_memory["cpu"] = "24GiB" # Allow CPU memory as fallback
169
170
# Let HF handle distribution with max_memory
0 commit comments