Skip to content

Commit c11b3e2

Browse files
committed
Sort imports for optional third-party libraries.
These libraries aren't always installed in the virtual environment where isort is running. Declaring them properly avoids mixing these third-party imports with local imports.
1 parent 2a34d5b commit c11b3e2

10 files changed

+28
-16
lines changed

.circleci/config.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ jobs:
9595
steps:
9696
- checkout
9797
- run: sudo pip install --editable .
98-
- run: sudo pip install torch tensorflow tensorboardX scikit-learn
98+
- run: sudo pip install torch tensorflow
9999
- run: sudo pip install black git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort flake8
100100
- run: black --check --line-length 119 examples templates transformers utils
101101
- run: isort --check-only --recursive examples templates transformers utils

examples/distillation/distiller.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import os
2020
import time
2121

22+
import psutil
2223
import torch
2324
import torch.nn as nn
2425
import torch.nn.functional as F
@@ -27,7 +28,6 @@
2728
from torch.utils.data.distributed import DistributedSampler
2829
from tqdm import tqdm
2930

30-
import psutil
3131
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
3232
from lm_seqs_dataset import LmSeqsDataset
3333
from transformers import get_linear_schedule_with_warmup

examples/distillation/utils.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,10 @@
2020
import os
2121
import socket
2222

23+
import git
2324
import numpy as np
2425
import torch
2526

26-
import git
27-
2827

2928
logging.basicConfig(
3029
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",

examples/mm-imdb/utils_mmimdb.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,10 @@
2020

2121
import torch
2222
import torch.nn as nn
23-
from torch.utils.data import Dataset
24-
2523
import torchvision
2624
import torchvision.transforms as transforms
2725
from PIL import Image
26+
from torch.utils.data import Dataset
2827

2928

3029
POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}

examples/pplm/run_pplm_discrim_train.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,12 @@
2626
import torch.nn.functional as F
2727
import torch.optim as optim
2828
import torch.utils.data as data
29-
from tqdm import tqdm, trange
30-
3129
from nltk.tokenize.treebank import TreebankWordDetokenizer
32-
from pplm_classification_head import ClassificationHead
3330
from torchtext import data as torchtext_data
3431
from torchtext import datasets
32+
from tqdm import tqdm, trange
33+
34+
from pplm_classification_head import ClassificationHead
3535
from transformers import GPT2LMHeadModel, GPT2Tokenizer
3636

3737

examples/run_ner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@
2525

2626
import numpy as np
2727
import torch
28+
from seqeval.metrics import f1_score, precision_score, recall_score
2829
from tensorboardX import SummaryWriter
2930
from torch.nn import CrossEntropyLoss
3031
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
3132
from torch.utils.data.distributed import DistributedSampler
3233
from tqdm import tqdm, trange
3334

34-
from seqeval.metrics import f1_score, precision_score, recall_score
3535
from transformers import (
3636
WEIGHTS_NAME,
3737
AdamW,

examples/run_tf_glue.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import os
22

33
import tensorflow as tf
4-
54
import tensorflow_datasets
5+
66
from transformers import (
77
BertConfig,
88
BertForSequenceClassification,

examples/run_tf_ner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
import numpy as np
1010
import tensorflow as tf
1111
from absl import app, flags, logging
12-
1312
from fastprogress import master_bar, progress_bar
1413
from seqeval import metrics
14+
1515
from transformers import (
1616
TF2_WEIGHTS_NAME,
1717
BertConfig,

setup.cfg

+15-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,21 @@ ensure_newline_before_comments = True
33
force_grid_wrap = 0
44
include_trailing_comma = True
55
known_first_party = transformers
6-
known_third_party = packaging
6+
known_third_party =
7+
fairseq
8+
fastprogress
9+
git
10+
nltk
11+
packaging
12+
PIL
13+
psutil
14+
seqeval
15+
sklearn
16+
tensorboardX
17+
tensorflow_datasets
18+
torchtext
19+
torchvision
20+
721
line_length = 119
822
lines_after_imports = 2
923
multi_line_output = 3

transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@
2020
import logging
2121
import pathlib
2222

23-
import torch
24-
from packaging import version
25-
2623
import fairseq
24+
import torch
2725
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
2826
from fairseq.modules import TransformerSentenceEncoderLayer
27+
from packaging import version
28+
2929
from transformers.modeling_bert import (
3030
BertConfig,
3131
BertIntermediate,

0 commit comments

Comments
 (0)