Skip to content

Commit 1137f39

Browse files
authored
add ruff lint rule to remove unused imports via ruff (#969)
remove unused imports via ruff
1 parent 20f3796 commit 1137f39

File tree

11 files changed

+68
-56
lines changed

11 files changed

+68
-56
lines changed

.github/workflows/ruff_linter.yml

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,18 @@ jobs:
2525
- name: Install dependencies
2626
run: |
2727
python -m pip install --upgrade pip
28-
pip install ruff
28+
pip install ruff==0.6.8
2929
- name: Analyzing the code with ruff
3030
run: |
3131
ruff check .
32-
- name: Check all Python files for syntax errors (E999) and undefined vars (F821)
32+
- name: Check *all* Python files for F821, F823, and W191
3333
run: |
34-
ruff check --isolated --select E999,F821
35-
- name: Check well formatted code
34+
# --isolated is used to skip the allowlist at all so this applies to all files
35+
# please be careful when using this large changes means everyone needs to rebase
36+
ruff check --isolated --select F821,F823,W191
37+
- name: Check the allow-listed files for F,I
38+
run: |
39+
ruff check --select F,I
40+
- name: Check the allow-listed files for well formatted code
3641
run: |
3742
ruff format --check

dev-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,5 @@ tabulate # QOL for printing tables to stdout
1919
ninja
2020

2121
# Linting
22-
ruff
22+
ruff==0.6.8
2323
pre-commit

test/dtypes/test_affine_quantized_float.py

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,36 @@
1+
import pytest
2+
13
from torchao.utils import (
24
TORCH_VERSION_AT_LEAST_2_5,
35
)
4-
import pytest
56

67
if not TORCH_VERSION_AT_LEAST_2_5:
78
pytest.skip("Unsupported PyTorch version", allow_module_level=True)
89

10+
import copy
11+
import io
12+
import random
13+
import unittest
14+
from contextlib import nullcontext
15+
from functools import partial
16+
from typing import Tuple
17+
18+
import pytest
19+
import torch
920
from torch._inductor.test_case import TestCase as InductorTestCase
1021
from torch.testing._internal import common_utils
1122

23+
from torchao.float8.float8_utils import compute_error
1224
from torchao.quantization import (
13-
quantize_,
14-
float8_weight_only,
1525
float8_dynamic_activation_float8_weight,
26+
float8_weight_only,
27+
quantize_,
1628
)
29+
from torchao.quantization.observer import PerRow, PerTensor
1730
from torchao.quantization.quant_api import (
1831
float8_static_activation_float8_weight,
1932
)
20-
from torchao.quantization.quant_primitives import choose_qparams_affine, MappingType
21-
from torchao.quantization.observer import PerTensor, PerRow
22-
from torchao.float8.float8_utils import compute_error
23-
import torch
24-
import unittest
25-
import pytest
26-
import copy
27-
import random
28-
from functools import partial
29-
from typing import Tuple
30-
from contextlib import nullcontext
31-
import io
32-
33+
from torchao.quantization.quant_primitives import MappingType, choose_qparams_affine
3334

3435
random.seed(0)
3536
torch.manual_seed(0)

test/dtypes/test_nf4.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,18 @@
11
import copy
2+
import io
23
import logging
3-
import unittest
4-
from packaging import version
54
import math
5+
import unittest
6+
from collections import OrderedDict
7+
from typing import Tuple, Union
8+
69
import pytest
710
import torch
11+
import torch.nn.functional as F
812
from torch import nn
913
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
10-
apply_activation_checkpointing,
1114
CheckpointWrapper,
15+
apply_activation_checkpointing,
1216
)
1317
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
1418
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
@@ -19,18 +23,15 @@
1923
parametrize,
2024
run_tests,
2125
)
26+
27+
import torchao
28+
from packaging import version
2229
from torchao.dtypes.nf4tensor import (
30+
_INNER_TENSOR_NAMES_FOR_SHARDING,
2331
NF4Tensor,
2432
linear_nf4,
2533
to_nf4,
26-
_INNER_TENSOR_NAMES_FOR_SHARDING,
2734
)
28-
import torch.nn.functional as F
29-
import io
30-
from collections import OrderedDict
31-
import torchao
32-
from typing import Tuple, Union
33-
3435

3536
bnb_available = False
3637

test/quantization/test_observer.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,25 @@
11
import re
2+
import unittest
3+
24
import torch
35
import torch.nn as nn
6+
7+
# NOTE: we can copy paste these here if we decide to deprecate them in torch.ao
8+
from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
9+
from torch.testing._internal import common_utils
410
from torch.testing._internal.common_utils import TestCase
11+
512
from torchao.quantization.observer import (
613
AffineQuantizedMinMaxObserver,
7-
PerTensor,
814
PerAxis,
9-
)
10-
from torchao.quantization.quant_primitives import (
11-
MappingType,
15+
PerTensor,
1216
)
1317
from torchao.quantization.quant_api import (
1418
insert_observers_,
1519
)
16-
from torch.testing._internal import common_utils
17-
import unittest
18-
19-
# NOTE: we can copy paste these here if we decide to deprecate them in torch.ao
20-
from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
20+
from torchao.quantization.quant_primitives import (
21+
MappingType,
22+
)
2123

2224

2325
class TestQuantFlow(TestCase):

torchao/dtypes/nf4tensor.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,14 @@
11
import functools
2-
from dataclasses import dataclass, replace
32
import math
4-
from typing import Dict, Tuple, Any, Optional, Union
53
import sys
4+
from dataclasses import dataclass, replace
65
from enum import Enum, auto
6+
from typing import Any, Dict, Optional, Tuple, Union
77

88
import torch
99
import torch.nn.functional as F
10-
from torch.distributed.device_mesh import DeviceMesh
1110
from torch._prims_common import make_contiguous_strides_for
12-
11+
from torch.distributed.device_mesh import DeviceMesh
1312

1413
aten = torch.ops.aten
1514

torchao/float8/float8_tensor.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,16 @@
44
# This source code is licensed under the BSD 3-Clause license found in the
55
# LICENSE file in the root directory of this source tree.
66
import enum
7-
from typing import Dict, Optional, NamedTuple
7+
from typing import Dict, NamedTuple, Optional
88

99
import torch
10-
1110
import torch.distributed._functional_collectives as funcol
11+
from torch.distributed._tensor import DTensor
12+
1213
from torchao.float8.float8_utils import (
1314
e4m3_dtype,
1415
to_fp8_saturated,
1516
)
16-
from torch.distributed._tensor import DTensor
1717

1818
aten = torch.ops.aten
1919

torchao/float8/float8_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,11 @@
66

77
from typing import Iterable, Literal, Tuple, Union
88

9-
import torchao.float8.config as config
10-
119
import torch
1210
import torch.distributed as dist
1311

12+
import torchao.float8.config as config
13+
1414
# Helpful visualizer for debugging (only supports fp32):
1515
# https://www.h-schmidt.net/FloatConverter/IEEE754.html
1616

torchao/float8/inference.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@
77
Defines an nn module designed to be used during inference
88
"""
99

10-
from typing import Optional, Tuple, NamedTuple
10+
from typing import NamedTuple, Optional, Tuple
1111

1212
import torch
13+
1314
from torchao.float8.float8_utils import is_row_major, pad_tensor_for_matmul
1415

1516
Tensor = torch.Tensor

torchao/quantization/linear_activation_weight_observer.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1+
from typing import Callable, Dict, Optional
2+
13
import torch
2-
from typing import Callable, Optional, Dict
34
from torch.utils._python_dispatch import return_and_correct_aliasing
5+
6+
from torchao.quantization.observer import AffineQuantizedObserverBase
47
from torchao.utils import (
5-
TorchAOBaseTensor,
68
TORCH_VERSION_AT_LEAST_2_5,
9+
TorchAOBaseTensor,
710
)
811

9-
from torchao.quantization.observer import AffineQuantizedObserverBase
10-
1112
__all__ = [
1213
"LinearActivationWeightObservedTensor",
1314
]

0 commit comments

Comments
 (0)