From 5edf1c157af18e7dcd5222bb4e5bff78564d474c Mon Sep 17 00:00:00 2001 From: Shreyas Ravi Date: Wed, 4 Feb 2026 16:49:03 +0000 Subject: [PATCH] fix(quantizer): correct BlockMinifloatQuantize backward signature Removed unnecessary arguments from the backward method to match PyTorch autograd.Function requirements and fix TypeError. --- src/chop/nn/quantizers/block_minifloat.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/chop/nn/quantizers/block_minifloat.py b/src/chop/nn/quantizers/block_minifloat.py index 34e00bbcb..f10df89b2 100644 --- a/src/chop/nn/quantizers/block_minifloat.py +++ b/src/chop/nn/quantizers/block_minifloat.py @@ -84,11 +84,6 @@ def forward( def backward( ctx, grad_output: Tensor, - width: int, - exponent_width: int, - exponent_bias_width: int, - block_size: list[int] | int = [16], - skip_first_dim: bool = False, ): return grad_output, None, None, None, None, None