We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 41aa0fc commit de9f16aCopy full SHA for de9f16a
src/compressed_tensors/quantization/utils/helpers.py
@@ -92,7 +92,7 @@ def calculate_qparams(
92
93
if global_scale is not None:
94
# Conditionally scale the generated local scale by a global_scale
95
- scales = torch.clamp(
+ scales = global_scale * torch.clamp(
96
scales,
97
max=torch.finfo(quantization_args.scale_dtype).max,
98
min=torch.finfo(quantization_args.scale_dtype).min,
0 commit comments