Skip to content

Commit de9f16a

Browse files
committed
update
1 parent 41aa0fc commit de9f16a

File tree

1 file changed

+1
-1
lines changed
  • src/compressed_tensors/quantization/utils

1 file changed

+1
-1
lines changed

src/compressed_tensors/quantization/utils/helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def calculate_qparams(
9292

9393
if global_scale is not None:
9494
# Conditionally scale the generated local scale by a global_scale
95-
scales = torch.clamp(
95+
scales = global_scale * torch.clamp(
9696
scales,
9797
max=torch.finfo(quantization_args.scale_dtype).max,
9898
min=torch.finfo(quantization_args.scale_dtype).min,

0 commit comments

Comments
 (0)