From 7fece44ca475d67c8dca9dcf37dd0e44cf2372c8 Mon Sep 17 00:00:00 2001 From: HDCharles <39544797+HDCharles@users.noreply.github.com> Date: Thu, 22 May 2025 23:11:24 -0400 Subject: [PATCH] Revert "Patch the _is_conv_node function" This reverts commit 3884e29cc78de7688f44b317c457728fab1bf9f1. --- test/quantization/pt2e/test_quantize_pt2e.py | 20 -------------------- torchao/quantization/pt2e/utils.py | 3 --- 2 files changed, 23 deletions(-) diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index a96f397925..75701c55ca 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -2478,26 +2478,6 @@ def forward(self, x): node_list, ) - example_inputs = (torch.randn(1, 3, 5, 5),) - node_occurrence = { - # two for input of the first conv, one for output for the first conv - torch.ops.quantized_decomposed.quantize_per_tensor.default: 2, - torch.ops.quantized_decomposed.dequantize_per_tensor.default: 3, - } - node_list = [ - torch.ops.quantized_decomposed.dequantize_per_tensor.default, - torch.ops.quantized_decomposed.dequantize_per_tensor.default, - torch.ops.aten.conv2d.padding, - torch.ops.aten.relu.default, - torch.ops.quantized_decomposed.quantize_per_tensor.default, - ] - self._test_quantizer( - TestHelperModules.ConvWithBNRelu(dim=2, relu=True, bn=True, padding="same"), - example_inputs, - BackendAQuantizer(), - node_occurrence, - node_list, - ) def test_conv_transpose3d_bn_relu(self): class BackendAQuantizer(Quantizer): def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: diff --git a/torchao/quantization/pt2e/utils.py b/torchao/quantization/pt2e/utils.py index dc5f802fb8..ad5c0ae179 100644 --- a/torchao/quantization/pt2e/utils.py +++ b/torchao/quantization/pt2e/utils.py @@ -625,11 +625,8 @@ def _is_conv_node(n: Node): """ return n.op == "call_function" and n.target in [ torch.ops.aten.conv1d.default, - torch.ops.aten.conv1d.padding, torch.ops.aten.conv2d.default, - torch.ops.aten.conv2d.padding, torch.ops.aten.conv3d.default, - torch.ops.aten.conv3d.padding, ]