Skip to content

Commit 6ca9c76

Browse files
pmeierNicolasHug
andauthored
Upgrade usort to 1.0.2 and black to 22.3.0 (#5106)
* upgrade usort to * Also update black * Actually use 1.0.2 * Apply pre-commit Co-authored-by: Nicolas Hug <contact@nicolas-hug.com>
1 parent 9293be7 commit 6ca9c76

File tree

191 files changed

+571
-763
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

191 files changed

+571
-763
lines changed

.pre-commit-config.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ repos:
1515
hooks:
1616
- id: ufmt
1717
additional_dependencies:
18-
- black == 21.9b0
19-
- usort == 0.6.4
18+
- black == 22.3.0
19+
- usort == 1.0.2
2020

2121
- repo: https://gitlab.com/pycqa/flake8
2222
rev: 3.9.2

hubconf.py

+27-33
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33

44
from torchvision.models import get_weight
55
from torchvision.models.alexnet import alexnet
6-
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
7-
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
6+
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
7+
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
88
from torchvision.models.efficientnet import (
99
efficientnet_b0,
1010
efficientnet_b1,
@@ -14,9 +14,9 @@
1414
efficientnet_b5,
1515
efficientnet_b6,
1616
efficientnet_b7,
17-
efficientnet_v2_s,
18-
efficientnet_v2_m,
1917
efficientnet_v2_l,
18+
efficientnet_v2_m,
19+
efficientnet_v2_s,
2020
)
2121
from torchvision.models.googlenet import googlenet
2222
from torchvision.models.inception import inception_v3
@@ -25,40 +25,40 @@
2525
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
2626
from torchvision.models.optical_flow import raft_large, raft_small
2727
from torchvision.models.regnet import (
28-
regnet_y_400mf,
29-
regnet_y_800mf,
30-
regnet_y_1_6gf,
31-
regnet_y_3_2gf,
32-
regnet_y_8gf,
33-
regnet_y_16gf,
34-
regnet_y_32gf,
35-
regnet_y_128gf,
36-
regnet_x_400mf,
37-
regnet_x_800mf,
28+
regnet_x_16gf,
3829
regnet_x_1_6gf,
30+
regnet_x_32gf,
3931
regnet_x_3_2gf,
32+
regnet_x_400mf,
33+
regnet_x_800mf,
4034
regnet_x_8gf,
41-
regnet_x_16gf,
42-
regnet_x_32gf,
35+
regnet_y_128gf,
36+
regnet_y_16gf,
37+
regnet_y_1_6gf,
38+
regnet_y_32gf,
39+
regnet_y_3_2gf,
40+
regnet_y_400mf,
41+
regnet_y_800mf,
42+
regnet_y_8gf,
4343
)
4444
from torchvision.models.resnet import (
45+
resnet101,
46+
resnet152,
4547
resnet18,
4648
resnet34,
4749
resnet50,
48-
resnet101,
49-
resnet152,
50-
resnext50_32x4d,
5150
resnext101_32x8d,
5251
resnext101_64x4d,
53-
wide_resnet50_2,
52+
resnext50_32x4d,
5453
wide_resnet101_2,
54+
wide_resnet50_2,
5555
)
5656
from torchvision.models.segmentation import (
57-
fcn_resnet50,
58-
fcn_resnet101,
59-
deeplabv3_resnet50,
60-
deeplabv3_resnet101,
6157
deeplabv3_mobilenet_v3_large,
58+
deeplabv3_resnet101,
59+
deeplabv3_resnet50,
60+
fcn_resnet101,
61+
fcn_resnet50,
6262
lraspp_mobilenet_v3_large,
6363
)
6464
from torchvision.models.shufflenetv2 import (
@@ -68,12 +68,6 @@
6868
shufflenet_v2_x2_0,
6969
)
7070
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
71-
from torchvision.models.swin_transformer import swin_t, swin_s, swin_b
72-
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
73-
from torchvision.models.vision_transformer import (
74-
vit_b_16,
75-
vit_b_32,
76-
vit_l_16,
77-
vit_l_32,
78-
vit_h_14,
79-
)
71+
from torchvision.models.swin_transformer import swin_b, swin_s, swin_t
72+
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
73+
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32

references/classification/train_quantization.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import torchvision
1010
import utils
1111
from torch import nn
12-
from train import train_one_epoch, evaluate, load_data
12+
from train import evaluate, load_data, train_one_epoch
1313

1414

1515
def main(args):

references/detection/group_by_aspect_ratio.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import copy
33
import math
44
from collections import defaultdict
5-
from itertools import repeat, chain
5+
from itertools import chain, repeat
66

77
import numpy as np
88
import torch

references/detection/train.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
import torchvision.models.detection.mask_rcnn
3030
import utils
3131
from coco_utils import get_coco, get_coco_kp
32-
from engine import train_one_epoch, evaluate
33-
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
32+
from engine import evaluate, train_one_epoch
33+
from group_by_aspect_ratio import create_aspect_ratio_groups, GroupedBatchSampler
3434
from torchvision.transforms import InterpolationMode
3535
from transforms import SimpleCopyPaste
3636

references/detection/transforms.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
1-
from typing import List, Tuple, Dict, Optional, Union
1+
from typing import Dict, List, Optional, Tuple, Union
22

33
import torch
44
import torchvision
55
from torch import nn, Tensor
66
from torchvision import ops
7-
from torchvision.transforms import functional as F
8-
from torchvision.transforms import transforms as T, InterpolationMode
7+
from torchvision.transforms import functional as F, InterpolationMode, transforms as T
98

109

1110
def _flip_coco_person_keypoints(kps, width):

references/optical_flow/train.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
import torch
77
import torchvision.models.optical_flow
88
import utils
9-
from presets import OpticalFlowPresetTrain, OpticalFlowPresetEval
10-
from torchvision.datasets import KittiFlow, FlyingChairs, FlyingThings3D, Sintel, HD1K
9+
from presets import OpticalFlowPresetEval, OpticalFlowPresetTrain
10+
from torchvision.datasets import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
1111

1212

1313
def get_train_dataset(stage, dataset_root):

references/optical_flow/utils.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
import datetime
22
import os
33
import time
4-
from collections import defaultdict
5-
from collections import deque
4+
from collections import defaultdict, deque
65

76
import torch
87
import torch.distributed as dist
@@ -158,7 +157,7 @@ def log_every(self, iterable, print_freq=5, header=None):
158157
def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None):
159158

160159
epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt()
161-
flow_norm = (flow_gt ** 2).sum(dim=1).sqrt()
160+
flow_norm = (flow_gt**2).sum(dim=1).sqrt()
162161

163162
if valid_flow_mask is not None:
164163
epe = epe[valid_flow_mask]
@@ -183,7 +182,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
183182
raise ValueError(f"Gamma should be < 1, got {gamma}.")
184183

185184
# exlude invalid pixels and extremely large diplacements
186-
flow_norm = torch.sum(flow_gt ** 2, dim=1).sqrt()
185+
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
187186
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
188187

189188
valid_flow_mask = valid_flow_mask[:, None, :, :]

references/segmentation/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def update(self, a, b):
7575
with torch.inference_mode():
7676
k = (a >= 0) & (a < n)
7777
inds = n * a[k].to(torch.int64) + b[k]
78-
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
78+
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
7979

8080
def reset(self):
8181
self.mat.zero_()

setup.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
import sys
88

99
import torch
10-
from pkg_resources import parse_version, get_distribution, DistributionNotFound
11-
from setuptools import setup, find_packages
12-
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
10+
from pkg_resources import DistributionNotFound, get_distribution, parse_version
11+
from setuptools import find_packages, setup
12+
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
1313

1414

1515
def read(*names, **kwargs):

test/builtin_dataset_mocks.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@
1414
import unittest.mock
1515
import warnings
1616
import xml.etree.ElementTree as ET
17-
from collections import defaultdict, Counter
17+
from collections import Counter, defaultdict
1818

1919
import numpy as np
2020
import pytest
2121
import torch
22-
from datasets_utils import make_zip, make_tar, create_image_folder, create_image_file, combinations_grid
22+
from datasets_utils import combinations_grid, create_image_file, create_image_folder, make_tar, make_zip
2323
from torch.nn.functional import one_hot
2424
from torch.testing import make_tensor as _make_tensor
2525
from torchvision.prototype import datasets

test/conftest.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import numpy as np
44
import pytest
55
import torch
6-
from common_utils import IN_CIRCLE_CI, CIRCLECI_GPU_NO_CUDA_MSG, IN_FBCODE, IN_RE_WORKER, CUDA_NOT_AVAILABLE_MSG
6+
from common_utils import CIRCLECI_GPU_NO_CUDA_MSG, CUDA_NOT_AVAILABLE_MSG, IN_CIRCLE_CI, IN_FBCODE, IN_RE_WORKER
77

88

99
def pytest_configure(config):

test/datasets_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import torch
2323
import torchvision.datasets
2424
import torchvision.io
25-
from common_utils import get_tmp_dir, disable_console_output
25+
from common_utils import disable_console_output, get_tmp_dir
2626

2727

2828
__all__ = [

test/test_datasets_download.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,15 @@
99
from os import path
1010
from urllib.error import HTTPError, URLError
1111
from urllib.parse import urlparse
12-
from urllib.request import urlopen, Request
12+
from urllib.request import Request, urlopen
1313

1414
import pytest
1515
from torchvision import datasets
1616
from torchvision.datasets.utils import (
17-
download_url,
17+
_get_redirect_url,
1818
check_integrity,
1919
download_file_from_google_drive,
20-
_get_redirect_url,
20+
download_url,
2121
USER_AGENT,
2222
)
2323

test/test_datasets_samplers.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,8 @@
11
import pytest
22
import torch
3-
from common_utils import get_list_of_videos, assert_equal
3+
from common_utils import assert_equal, get_list_of_videos
44
from torchvision import io
5-
from torchvision.datasets.samplers import (
6-
DistributedSampler,
7-
RandomClipSampler,
8-
UniformClipSampler,
9-
)
5+
from torchvision.datasets.samplers import DistributedSampler, RandomClipSampler, UniformClipSampler
106
from torchvision.datasets.video_utils import VideoClips
117

128

test/test_datasets_video_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
import torch
3-
from common_utils import get_list_of_videos, assert_equal
3+
from common_utils import assert_equal, get_list_of_videos
44
from torchvision import io
5-
from torchvision.datasets.video_utils import VideoClips, unfold
5+
from torchvision.datasets.video_utils import unfold, VideoClips
66

77

88
class TestVideo:

test/test_extended_models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import test_models as TM
66
import torch
77
from torchvision import models
8-
from torchvision.models._api import WeightsEnum, Weights
8+
from torchvision.models._api import Weights, WeightsEnum
99
from torchvision.models._utils import handle_legacy_interface
1010

1111

test/test_functional_tensor.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@
1414
import torchvision.transforms.functional_pil as F_pil
1515
import torchvision.transforms.functional_tensor as F_t
1616
from common_utils import (
17-
cpu_and_gpu,
18-
needs_cuda,
17+
_assert_approx_equal_tensor_to_pil,
18+
_assert_equal_tensor_to_pil,
1919
_create_data,
2020
_create_data_batch,
21-
_assert_equal_tensor_to_pil,
22-
_assert_approx_equal_tensor_to_pil,
2321
_test_fn_on_batch,
2422
assert_equal,
23+
cpu_and_gpu,
24+
needs_cuda,
2525
)
2626
from torchvision.transforms import InterpolationMode
2727

test/test_image.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,21 @@
88
import pytest
99
import torch
1010
import torchvision.transforms.functional as F
11-
from common_utils import needs_cuda, assert_equal
12-
from PIL import Image, __version__ as PILLOW_VERSION
11+
from common_utils import assert_equal, needs_cuda
12+
from PIL import __version__ as PILLOW_VERSION, Image
1313
from torchvision.io.image import (
14-
decode_png,
14+
_read_png_16,
15+
decode_image,
1516
decode_jpeg,
17+
decode_png,
1618
encode_jpeg,
17-
write_jpeg,
18-
decode_image,
19-
read_file,
2019
encode_png,
21-
write_png,
22-
write_file,
2320
ImageReadMode,
21+
read_file,
2422
read_image,
25-
_read_png_16,
23+
write_file,
24+
write_jpeg,
25+
write_png,
2626
)
2727

2828
IMAGE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets")
@@ -168,7 +168,7 @@ def test_decode_png(img_path, pil_mode, mode):
168168
img_lpng = _read_png_16(img_path, mode=mode)
169169
assert img_lpng.dtype == torch.int32
170170
# PIL converts 16 bits pngs in uint8
171-
img_lpng = torch.round(img_lpng / (2 ** 16 - 1) * 255).to(torch.uint8)
171+
img_lpng = torch.round(img_lpng / (2**16 - 1) * 255).to(torch.uint8)
172172
else:
173173
data = read_file(img_path)
174174
img_lpng = decode_image(data, mode=mode)

test/test_models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import torch.fx
1515
import torch.nn as nn
1616
from _utils_internal import get_relative_path
17-
from common_utils import map_nested_tensor_object, freeze_rng_state, set_rng_seed, cpu_and_gpu, needs_cuda
17+
from common_utils import cpu_and_gpu, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
1818
from torchvision import models
1919

2020
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"

test/test_models_detection_negative_samples.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from common_utils import assert_equal
55
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
66
from torchvision.models.detection.roi_heads import RoIHeads
7-
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
7+
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
88
from torchvision.ops import MultiScaleRoIAlign
99

1010

@@ -60,7 +60,7 @@ def test_assign_targets_to_proposals(self):
6060

6161
resolution = box_roi_pool.output_size[0]
6262
representation_size = 1024
63-
box_head = TwoMLPHead(4 * resolution ** 2, representation_size)
63+
box_head = TwoMLPHead(4 * resolution**2, representation_size)
6464

6565
representation_size = 1024
6666
box_predictor = FastRCNNPredictor(representation_size, 2)

0 commit comments

Comments
 (0)