From 107fb9acfbd6e74247542c81e88920b51f96787f Mon Sep 17 00:00:00 2001
From: Kaathi <karthigeyan@honeybadgerlabs.in>
Date: Thu, 24 Oct 2019 23:05:55 +0530
Subject: [PATCH 1/4] Clean up noqa E241 #29207

---
 pandas/tests/dtypes/test_inference.py | 80 +++++++++++++--------------
 pandas/tests/test_strings.py          | 74 ++++++++++++-------------
 2 files changed, 77 insertions(+), 77 deletions(-)

diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 60afd768195d9..aeec12b9ad14e 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -62,46 +62,46 @@ def coerce(request):
 # collect all objects to be tested for list-like-ness; use tuples of objects,
 # whether they are list-like or not (special casing for sets), and their ID
 ll_params = [
-    ([1], True, "list"),  # noqa: E241
-    ([], True, "list-empty"),  # noqa: E241
-    ((1,), True, "tuple"),  # noqa: E241
-    (tuple(), True, "tuple-empty"),  # noqa: E241
-    ({"a": 1}, True, "dict"),  # noqa: E241
-    (dict(), True, "dict-empty"),  # noqa: E241
-    ({"a", 1}, "set", "set"),  # noqa: E241
-    (set(), "set", "set-empty"),  # noqa: E241
-    (frozenset({"a", 1}), "set", "frozenset"),  # noqa: E241
-    (frozenset(), "set", "frozenset-empty"),  # noqa: E241
-    (iter([1, 2]), True, "iterator"),  # noqa: E241
-    (iter([]), True, "iterator-empty"),  # noqa: E241
-    ((x for x in [1, 2]), True, "generator"),  # noqa: E241
-    ((_ for _ in []), True, "generator-empty"),  # noqa: E241
-    (Series([1]), True, "Series"),  # noqa: E241
-    (Series([]), True, "Series-empty"),  # noqa: E241
-    (Series(["a"]).str, True, "StringMethods"),  # noqa: E241
-    (Series([], dtype="O").str, True, "StringMethods-empty"),  # noqa: E241
-    (Index([1]), True, "Index"),  # noqa: E241
-    (Index([]), True, "Index-empty"),  # noqa: E241
-    (DataFrame([[1]]), True, "DataFrame"),  # noqa: E241
-    (DataFrame(), True, "DataFrame-empty"),  # noqa: E241
-    (np.ndarray((2,) * 1), True, "ndarray-1d"),  # noqa: E241
-    (np.array([]), True, "ndarray-1d-empty"),  # noqa: E241
-    (np.ndarray((2,) * 2), True, "ndarray-2d"),  # noqa: E241
-    (np.array([[]]), True, "ndarray-2d-empty"),  # noqa: E241
-    (np.ndarray((2,) * 3), True, "ndarray-3d"),  # noqa: E241
-    (np.array([[[]]]), True, "ndarray-3d-empty"),  # noqa: E241
-    (np.ndarray((2,) * 4), True, "ndarray-4d"),  # noqa: E241
-    (np.array([[[[]]]]), True, "ndarray-4d-empty"),  # noqa: E241
-    (np.array(2), False, "ndarray-0d"),  # noqa: E241
-    (1, False, "int"),  # noqa: E241
-    (b"123", False, "bytes"),  # noqa: E241
-    (b"", False, "bytes-empty"),  # noqa: E241
-    ("123", False, "string"),  # noqa: E241
-    ("", False, "string-empty"),  # noqa: E241
-    (str, False, "string-type"),  # noqa: E241
-    (object(), False, "object"),  # noqa: E241
-    (np.nan, False, "NaN"),  # noqa: E241
-    (None, False, "None"),  # noqa: E241
+    ([1], True, "list"),
+    ([], True, "list-empty"),
+    ((1,), True, "tuple"),
+    (tuple(), True, "tuple-empty"),
+    ({"a": 1}, True, "dict"),
+    (dict(), True, "dict-empty"),
+    ({"a", 1}, "set", "set"),
+    (set(), "set", "set-empty"),
+    (frozenset({"a", 1}), "set", "frozenset"),
+    (frozenset(), "set", "frozenset-empty"),
+    (iter([1, 2]), True, "iterator"),
+    (iter([]), True, "iterator-empty"),
+    ((x for x in [1, 2]), True, "generator"),
+    ((_ for _ in []), True, "generator-empty"),
+    (Series([1]), True, "Series"),
+    (Series([]), True, "Series-empty"),
+    (Series(["a"]).str, True, "StringMethods"),
+    (Series([], dtype="O").str, True, "StringMethods-empty"),
+    (Index([1]), True, "Index"),
+    (Index([]), True, "Index-empty"),
+    (DataFrame([[1]]), True, "DataFrame"),
+    (DataFrame(), True, "DataFrame-empty"),
+    (np.ndarray((2,) * 1), True, "ndarray-1d"),
+    (np.array([]), True, "ndarray-1d-empty"),
+    (np.ndarray((2,) * 2), True, "ndarray-2d"),
+    (np.array([[]]), True, "ndarray-2d-empty"),
+    (np.ndarray((2,) * 3), True, "ndarray-3d"),
+    (np.array([[[]]]), True, "ndarray-3d-empty"),
+    (np.ndarray((2,) * 4), True, "ndarray-4d"),
+    (np.array([[[[]]]]), True, "ndarray-4d-empty"),
+    (np.array(2), False, "ndarray-0d"),
+    (1, False, "int"),
+    (b"123", False, "bytes"),
+    (b"", False, "bytes-empty"),
+    ("123", False, "string"),
+    ("", False, "string-empty"),
+    (str, False, "string-type"),
+    (object(), False, "object"),
+    (np.nan, False, "NaN"),
+    (None, False, "None"),
 ]
 objs, expected, ids = zip(*ll_params)
 
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 53d74f74dc439..cfaf123045b1f 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -21,46 +21,46 @@ def assert_series_or_index_equal(left, right):
 
 
 _any_string_method = [
-    ("cat", (), {"sep": ","}),  # noqa: E241
-    ("cat", (Series(list("zyx")),), {"sep": ",", "join": "left"}),  # noqa: E241
-    ("center", (10,), {}),  # noqa: E241
-    ("contains", ("a",), {}),  # noqa: E241
-    ("count", ("a",), {}),  # noqa: E241
-    ("decode", ("UTF-8",), {}),  # noqa: E241
-    ("encode", ("UTF-8",), {}),  # noqa: E241
-    ("endswith", ("a",), {}),  # noqa: E241
-    ("extract", ("([a-z]*)",), {"expand": False}),  # noqa: E241
-    ("extract", ("([a-z]*)",), {"expand": True}),  # noqa: E241
-    ("extractall", ("([a-z]*)",), {}),  # noqa: E241
-    ("find", ("a",), {}),  # noqa: E241
-    ("findall", ("a",), {}),  # noqa: E241
-    ("get", (0,), {}),  # noqa: E241
+    ("cat", (), {"sep": ","}),
+    ("cat", (Series(list("zyx")),), {"sep": ",", "join": "left"}),
+    ("center", (10,), {}),
+    ("contains", ("a",), {}),
+    ("count", ("a",), {}),
+    ("decode", ("UTF-8",), {}),
+    ("encode", ("UTF-8",), {}),
+    ("endswith", ("a",), {}),
+    ("extract", ("([a-z]*)",), {"expand": False}),
+    ("extract", ("([a-z]*)",), {"expand": True}),
+    ("extractall", ("([a-z]*)",), {}),
+    ("find", ("a",), {}),
+    ("findall", ("a",), {}),
+    ("get", (0,), {}),
     # because "index" (and "rindex") fail intentionally
     # if the string is not found, search only for empty string
-    ("index", ("",), {}),  # noqa: E241
-    ("join", (",",), {}),  # noqa: E241
-    ("ljust", (10,), {}),  # noqa: E241
-    ("match", ("a",), {}),  # noqa: E241
-    ("normalize", ("NFC",), {}),  # noqa: E241
-    ("pad", (10,), {}),  # noqa: E241
-    ("partition", (" ",), {"expand": False}),  # noqa: E241
-    ("partition", (" ",), {"expand": True}),  # noqa: E241
-    ("repeat", (3,), {}),  # noqa: E241
-    ("replace", ("a", "z"), {}),  # noqa: E241
-    ("rfind", ("a",), {}),  # noqa: E241
-    ("rindex", ("",), {}),  # noqa: E241
-    ("rjust", (10,), {}),  # noqa: E241
-    ("rpartition", (" ",), {"expand": False}),  # noqa: E241
-    ("rpartition", (" ",), {"expand": True}),  # noqa: E241
-    ("slice", (0, 1), {}),  # noqa: E241
-    ("slice_replace", (0, 1, "z"), {}),  # noqa: E241
-    ("split", (" ",), {"expand": False}),  # noqa: E241
-    ("split", (" ",), {"expand": True}),  # noqa: E241
-    ("startswith", ("a",), {}),  # noqa: E241
+    ("index", ("",), {}),
+    ("join", (",",), {}),
+    ("ljust", (10,), {}),
+    ("match", ("a",), {}),
+    ("normalize", ("NFC",), {}),
+    ("pad", (10,), {}),
+    ("partition", (" ",), {"expand": False}),
+    ("partition", (" ",), {"expand": True}),
+    ("repeat", (3,), {}),
+    ("replace", ("a", "z"), {}),
+    ("rfind", ("a",), {}),
+    ("rindex", ("",), {}),
+    ("rjust", (10,), {}),
+    ("rpartition", (" ",), {"expand": False}),
+    ("rpartition", (" ",), {"expand": True}),
+    ("slice", (0, 1), {}),
+    ("slice_replace", (0, 1, "z"), {}),
+    ("split", (" ",), {"expand": False}),
+    ("split", (" ",), {"expand": True}),
+    ("startswith", ("a",), {}),
     # translating unicode points of "a" to "d"
-    ("translate", ({97: 100},), {}),  # noqa: E241
-    ("wrap", (2,), {}),  # noqa: E241
-    ("zfill", (10,), {}),  # noqa: E241
+    ("translate", ({97: 100},), {}),
+    ("wrap", (2,), {}),
+    ("zfill", (10,), {}),
 ] + list(
     zip(
         [

From 567b23da5681cd5e55c5fc0583b8fec0e3201a5a Mon Sep 17 00:00:00 2001
From: Kaathi <karthigeyan@honeybadgerlabs.in>
Date: Fri, 25 Oct 2019 11:28:08 +0530
Subject: [PATCH 2/4] Removed all # noqa comment as we have Black formatting

---
 asv_bench/benchmarks/algorithms.py            |  2 +-
 asv_bench/benchmarks/attrs_caching.py         |  2 +-
 asv_bench/benchmarks/binary_ops.py            |  2 +-
 asv_bench/benchmarks/categoricals.py          |  2 +-
 asv_bench/benchmarks/ctors.py                 |  2 +-
 asv_bench/benchmarks/dtypes.py                |  2 +-
 asv_bench/benchmarks/eval.py                  |  2 +-
 asv_bench/benchmarks/frame_ctor.py            |  4 +-
 asv_bench/benchmarks/frame_methods.py         |  2 +-
 asv_bench/benchmarks/gil.py                   |  4 +-
 asv_bench/benchmarks/groupby.py               |  2 +-
 asv_bench/benchmarks/index_object.py          |  2 +-
 asv_bench/benchmarks/indexing.py              |  2 +-
 asv_bench/benchmarks/inference.py             |  2 +-
 asv_bench/benchmarks/io/csv.py                |  2 +-
 asv_bench/benchmarks/io/excel.py              |  2 +-
 asv_bench/benchmarks/io/hdf.py                |  2 +-
 asv_bench/benchmarks/io/json.py               |  2 +-
 asv_bench/benchmarks/io/msgpack.py            |  2 +-
 asv_bench/benchmarks/io/pickle.py             |  2 +-
 asv_bench/benchmarks/io/sql.py                |  2 +-
 asv_bench/benchmarks/io/stata.py              |  2 +-
 asv_bench/benchmarks/join_merge.py            |  2 +-
 asv_bench/benchmarks/multiindex_object.py     |  2 +-
 asv_bench/benchmarks/offset.py                |  2 +-
 asv_bench/benchmarks/plotting.py              |  2 +-
 asv_bench/benchmarks/reindex.py               |  2 +-
 asv_bench/benchmarks/replace.py               |  2 +-
 asv_bench/benchmarks/reshape.py               |  2 +-
 asv_bench/benchmarks/rolling.py               |  2 +-
 asv_bench/benchmarks/series_methods.py        |  2 +-
 asv_bench/benchmarks/sparse.py                |  2 +-
 asv_bench/benchmarks/stat_ops.py              |  2 +-
 asv_bench/benchmarks/timeseries.py            |  2 +-
 doc/make.py                                   |  2 +-
 doc/source/conf.py                            | 10 +--
 doc/source/development/extending.rst          |  2 +-
 doc/source/getting_started/10min.rst          |  2 +-
 doc/source/getting_started/dsintro.rst        |  2 +-
 doc/source/user_guide/advanced.rst            |  4 +-
 doc/source/user_guide/computation.rst         |  2 +-
 doc/source/user_guide/enhancingperf.rst       |  6 +-
 doc/source/user_guide/groupby.rst             |  2 +-
 doc/source/user_guide/io.rst                  |  2 +-
 doc/source/user_guide/missing_data.rst        |  2 +-
 doc/source/user_guide/visualization.rst       |  2 +-
 doc/source/whatsnew/v0.13.0.rst               | 10 +--
 doc/source/whatsnew/v0.13.1.rst               |  2 +-
 doc/source/whatsnew/v0.14.0.rst               |  4 +-
 doc/source/whatsnew/v0.15.0.rst               |  4 +-
 doc/source/whatsnew/v0.15.2.rst               |  2 +-
 doc/source/whatsnew/v0.16.2.rst               |  8 +-
 doc/source/whatsnew/v0.17.0.rst               |  2 +-
 doc/source/whatsnew/v0.17.1.rst               |  2 +-
 doc/source/whatsnew/v0.18.0.rst               |  2 +-
 doc/source/whatsnew/v0.19.1.rst               |  2 +-
 doc/source/whatsnew/v0.19.2.rst               |  2 +-
 doc/source/whatsnew/v0.20.2.rst               |  2 +-
 doc/source/whatsnew/v0.20.3.rst               |  2 +-
 doc/source/whatsnew/v0.21.0.rst               |  2 +-
 doc/source/whatsnew/v0.21.1.rst               |  2 +-
 doc/source/whatsnew/v0.22.0.rst               |  2 +-
 doc/source/whatsnew/v0.23.0.rst               |  2 +-
 doc/source/whatsnew/v0.5.0.rst                |  2 +-
 doc/source/whatsnew/v0.6.0.rst                |  2 +-
 doc/source/whatsnew/v0.7.3.rst                |  6 +-
 pandas/_config/__init__.py                    |  2 +-
 pandas/_libs/parsers.pyx                      |  2 +-
 pandas/_libs/tslib.pyx                        |  2 +-
 pandas/_libs/tslibs/nattype.pyx               | 38 ++++-----
 pandas/_typing.py                             | 12 +--
 pandas/api/__init__.py                        |  2 +-
 pandas/api/extensions/__init__.py             |  8 +-
 pandas/api/types/__init__.py                  |  8 +-
 pandas/compat/pickle_compat.py                |  2 +-
 pandas/core/accessor.py                       |  2 +-
 pandas/core/arrays/__init__.py                | 20 ++---
 pandas/core/common.py                         |  2 +-
 pandas/core/computation/pytables.py           |  2 +-
 pandas/core/dtypes/common.py                  |  2 +-
 pandas/core/groupby/__init__.py               |  6 +-
 pandas/core/groupby/generic.py                |  2 +-
 pandas/core/groupby/grouper.py                |  2 +-
 pandas/core/index.py                          |  4 +-
 pandas/core/indexes/accessors.py              |  2 +-
 pandas/core/indexes/api.py                    | 12 +--
 pandas/core/indexes/category.py               |  6 +-
 pandas/core/indexing.py                       |  4 +-
 pandas/core/internals/__init__.py             | 12 +--
 pandas/core/internals/concat.py               |  2 +-
 pandas/core/ops/__init__.py                   | 10 +--
 pandas/core/tools/datetimes.py                |  2 +-
 pandas/core/util/hashing.py                   |  2 +-
 pandas/core/window/__init__.py                |  6 +-
 pandas/core/window/common.py                  |  2 +-
 pandas/io/clipboard/__init__.py               |  6 +-
 pandas/io/common.py                           |  4 +-
 pandas/io/formats/console.py                  |  4 +-
 pandas/io/html.py                             |  2 +-
 pandas/io/msgpack/__init__.py                 | 10 +--
 pandas/io/packers.py                          |  4 +-
 pandas/io/parsers.py                          |  4 +-
 pandas/io/sas/__init__.py                     |  2 +-
 pandas/io/sql.py                              |  2 +-
 .../arrays/categorical/test_constructors.py   |  8 +-
 pandas/tests/arrays/categorical/test_repr.py  | 80 +++++++++----------
 pandas/tests/arrays/sparse/test_array.py      |  8 +-
 pandas/tests/arrays/sparse/test_libsparse.py  |  4 +-
 pandas/tests/computation/test_compat.py       |  2 +-
 pandas/tests/computation/test_eval.py         | 32 ++++----
 pandas/tests/dtypes/test_inference.py         |  4 +-
 pandas/tests/extension/base/__init__.py       | 28 +++----
 pandas/tests/frame/test_alter_axes.py         |  2 +-
 pandas/tests/frame/test_analytics.py          | 20 ++---
 .../tests/frame/test_axis_select_reindex.py   |  2 +-
 pandas/tests/frame/test_block_internals.py    | 12 +--
 pandas/tests/frame/test_constructors.py       |  8 +-
 pandas/tests/frame/test_dtypes.py             |  4 +-
 pandas/tests/frame/test_indexing.py           |  6 +-
 pandas/tests/frame/test_missing.py            |  4 +-
 pandas/tests/frame/test_query_eval.py         | 14 ++--
 pandas/tests/frame/test_repr_info.py          | 12 +--
 pandas/tests/frame/test_timeseries.py         |  8 +-
 .../tests/groupby/aggregate/test_aggregate.py |  2 +-
 pandas/tests/indexes/multi/test_format.py     |  2 +-
 pandas/tests/indexes/multi/test_sorting.py    |  2 +-
 pandas/tests/indexes/test_category.py         | 24 +++---
 pandas/tests/indexing/test_callable.py        | 12 +--
 pandas/tests/indexing/test_indexing.py        |  4 +-
 pandas/tests/io/formats/test_style.py         |  2 +-
 pandas/tests/io/json/test_normalize.py        |  2 +-
 pandas/tests/io/json/test_pandas.py           |  2 +-
 pandas/tests/io/msgpack/test_obj.py           |  2 +-
 pandas/tests/io/parser/test_common.py         |  4 +-
 pandas/tests/io/parser/test_index_col.py      |  4 +-
 pandas/tests/io/parser/test_read_fwf.py       |  2 +-
 pandas/tests/io/pytables/test_store.py        | 24 +++---
 pandas/tests/io/test_feather.py               |  2 +-
 pandas/tests/io/test_packers.py               |  4 +-
 pandas/tests/io/test_parquet.py               |  8 +-
 pandas/tests/io/test_sql.py                   |  6 +-
 pandas/tests/io/test_stata.py                 |  4 +-
 pandas/tests/plotting/test_boxplot_method.py  |  4 +-
 pandas/tests/plotting/test_frame.py           |  2 +-
 pandas/tests/plotting/test_hist_method.py     |  2 +-
 pandas/tests/resample/test_base.py            |  2 +-
 pandas/tests/reshape/test_melt.py             |  2 +-
 .../tests/scalar/timedelta/test_timedelta.py  |  4 +-
 .../tests/scalar/timestamp/test_rendering.py  |  2 +-
 .../tests/scalar/timestamp/test_timestamp.py  |  2 +-
 pandas/tests/series/indexing/test_datetime.py |  2 +-
 pandas/tests/series/indexing/test_indexing.py |  2 +-
 pandas/tests/series/test_missing.py           |  4 +-
 pandas/tests/series/test_repr.py              | 18 ++---
 pandas/tests/test_algos.py                    |  4 +-
 pandas/tests/test_base.py                     |  8 +-
 pandas/tests/test_common.py                   |  4 +-
 pandas/tests/test_downstream.py               | 22 ++---
 pandas/tests/test_errors.py                   |  2 +-
 pandas/tests/test_strings.py                  |  2 +-
 pandas/tests/util/test_hashing.py             |  4 +-
 pandas/tseries/holiday.py                     |  2 +-
 pandas/util/__init__.py                       |  4 +-
 pandas/util/_decorators.py                    |  2 +-
 pandas/util/_tester.py                        |  2 +-
 pandas/util/testing.py                        |  2 +-
 scripts/tests/test_validate_docstrings.py     |  2 +-
 scripts/validate_docstrings.py                | 14 ++--
 setup.py                                      |  4 +-
 169 files changed, 441 insertions(+), 441 deletions(-)

diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 7d97f2c740acb..eeecee14b0ddb 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -183,4 +183,4 @@ def time_argsort(self, N):
         self.array.argsort()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index 501e27b9078ec..f2d3210d9bc63 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -33,4 +33,4 @@ def time_cache_readonly(self):
         self.obj.prop
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index 58e0db67d6025..98c1ba12b059c 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -156,4 +156,4 @@ def time_add_overflow_both_arg_nan(self):
         )
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 4384ccb7fa8b3..01986876f1e62 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -296,4 +296,4 @@ def time_categorical_contains(self):
         self.c.searchsorted(self.key)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index a9e45cad22d27..5262721ff8e96 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -114,4 +114,4 @@ def time_multiindex_from_iterables(self):
         MultiIndex.from_product(self.iterables)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py
index 24cc1c6f9fa70..5201ece488a20 100644
--- a/asv_bench/benchmarks/dtypes.py
+++ b/asv_bench/benchmarks/dtypes.py
@@ -40,4 +40,4 @@ def time_pandas_dtype_invalid(self, dtype):
             pass
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index cbab9fdc9c0ba..5b66c4ad4bb17 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -63,4 +63,4 @@ def time_query_with_boolean_selection(self):
         self.df.query("(a >= @self.min_val) & (a <= @self.max_val)")
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 3944e0bc523d8..4a5700878ce68 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -7,7 +7,7 @@
     from pandas.tseries.offsets import Nano, Hour
 except ImportError:
     # For compatibility with older versions
-    from pandas.core.datetools import *  # noqa
+    from pandas.core.datetools import *
 
 
 class FromDicts:
@@ -105,4 +105,4 @@ def time_frame_from_lists(self):
         self.df = DataFrame(self.data)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index eb9a0e83271f1..96fd7c66aa072 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -620,4 +620,4 @@ def time_select_dtypes(self, n):
         self.df.select_dtypes(include="int")
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index d57492dd37268..5aeffae040b08 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -37,7 +37,7 @@ def wrapper(fname):
         return wrapper
 
 
-from .pandas_vb_common import BaseIO  # noqa: E402 isort:skip
+from .pandas_vb_common import BaseIO
 
 
 class ParallelGroupbyMethods:
@@ -302,4 +302,4 @@ def time_loop(self, threads):
             self.loop()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index d51c53e2264f1..92efcf9b49718 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -625,4 +625,4 @@ def time_first(self):
         self.df_nans.groupby("key").transform("first")
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index a94960d494707..0c45c5969d480 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -245,4 +245,4 @@ def peakmem_gc_instances(self, N):
             gc.enable()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index ac35139c1954a..c816ea271a0cf 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -372,4 +372,4 @@ def time_chained_indexing(self, mode):
                 df2["C"] = 1.0
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index e85b3bd2c7687..ccc6eef574b2b 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -121,4 +121,4 @@ def time_convert(self, data):
         lib.maybe_convert_numeric(data, set(), coerce_numeric=False)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index 9b8599b0a1b64..1b5a444a34606 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -407,4 +407,4 @@ def time_to_datetime_format_DD_MM_YYYY(self, cache_dates):
         to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y")
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index c97cf768e27d9..da3f3a9e4da97 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -69,4 +69,4 @@ def time_read_excel(self, engine):
         read_excel(fname, engine=engine)
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index b78dc63d17130..6d084bac91dd8 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -128,4 +128,4 @@ def time_write_hdf(self, format):
         self.df.to_hdf(self.fname, "df", format=format)
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py
index 5c1d39776b91c..0e8dae1a0234c 100644
--- a/asv_bench/benchmarks/io/json.py
+++ b/asv_bench/benchmarks/io/json.py
@@ -215,4 +215,4 @@ def peakmem_float(self, frames):
             df.to_json()
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py
index f5038602539ab..97c1caf97ed0c 100644
--- a/asv_bench/benchmarks/io/msgpack.py
+++ b/asv_bench/benchmarks/io/msgpack.py
@@ -29,4 +29,4 @@ def time_write_msgpack(self):
         self.df.to_msgpack(self.fname)
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py
index 647e9d27dec9d..e2b53e04332fa 100644
--- a/asv_bench/benchmarks/io/pickle.py
+++ b/asv_bench/benchmarks/io/pickle.py
@@ -26,4 +26,4 @@ def time_write_pickle(self):
         self.df.to_pickle(self.fname)
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py
index fe84c869717e3..32945f1d4aa60 100644
--- a/asv_bench/benchmarks/io/sql.py
+++ b/asv_bench/benchmarks/io/sql.py
@@ -142,4 +142,4 @@ def time_read_sql_table_column(self, dtype):
         read_sql_table(self.table_name, self.con, columns=[dtype])
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py
index 28829785d72e9..e57b60eec396f 100644
--- a/asv_bench/benchmarks/io/stata.py
+++ b/asv_bench/benchmarks/io/stata.py
@@ -51,4 +51,4 @@ def setup(self, convert_dates):
         self.df.to_stata(self.fname, self.convert_dates)
 
 
-from ..pandas_vb_common import setup  # noqa: F401 isort:skip
+from ..pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 5cf9f6336ba0c..a7cf33a764408 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -382,4 +382,4 @@ def time_series_align_left_monotonic(self):
         self.ts1.align(self.ts2, join="left")
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index 3f4fd7ad911c1..c0294b54ad9d3 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -147,4 +147,4 @@ def time_categorical_level(self):
         self.df.set_index(["a", "b"])
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py
index d822646e712ae..129da744ab9ba 100644
--- a/asv_bench/benchmarks/offset.py
+++ b/asv_bench/benchmarks/offset.py
@@ -6,7 +6,7 @@
 import pandas as pd
 
 try:
-    import pandas.tseries.holiday  # noqa
+    import pandas.tseries.holiday
 except ImportError:
     pass
 
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py
index 5c718516360ed..d854cb827c458 100644
--- a/asv_bench/benchmarks/plotting.py
+++ b/asv_bench/benchmarks/plotting.py
@@ -94,4 +94,4 @@ def time_plot_andrews_curves(self):
         andrews_curves(self.df, "Name")
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index cd450f801c805..30c00fccd579e 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -161,4 +161,4 @@ def time_lib_fast_zip(self):
         lib.fast_zip(self.col_array_list)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 2a115fb0b4fe3..d4f3817178dca 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -74,4 +74,4 @@ def time_replace(self, constructor, replace_data):
         self.data.replace(self.to_replace)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 441f4b380656e..13aab6f1a6263 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -263,4 +263,4 @@ def time_explode(self, n_rows, max_list_length):
         self.series.explode()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 493f96d46d5e7..5a87da81306d4 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -144,4 +144,4 @@ def peakmem_fixed(self):
             self.roll.max()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index a3f1d92545c3f..42ffcbc708335 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -276,4 +276,4 @@ def time_func(self, func, N, dtype):
         self.func()
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index ac78ca53679fd..fa134d7cc4c75 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -136,4 +136,4 @@ def time_division(self, fill_value):
         self.arr1 / self.arr2
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index ed5ebfa61594e..3154151e30bde 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -159,4 +159,4 @@ def time_cov_series(self, use_bottleneck):
         self.s.cov(self.s2)
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 498774034d642..0d597d80d0116 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -428,4 +428,4 @@ def time_dt_accessor_year(self, tz):
         self.series.dt.year
 
 
-from .pandas_vb_common import setup  # noqa: F401 isort:skip
+from .pandas_vb_common import setup
diff --git a/doc/make.py b/doc/make.py
index cbb1fa6a5324a..a515139f580ad 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -80,7 +80,7 @@ def _process_single_doc(self, single_doc):
 
         elif single_doc.startswith("pandas."):
             try:
-                obj = pandas  # noqa: F821
+                obj = pandas
                 for name in single_doc.split("."):
                     obj = getattr(obj, name)
             except AttributeError:
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 13d3324caf249..4f5f8e42a986a 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -144,7 +144,7 @@
 # built documents.
 #
 # The short X.Y version.
-import pandas  # noqa: E402 isort:skip
+import pandas
 
 # version = '%s r%s' % (pandas.__version__, svn_version())
 version = str(pandas.__version__)
@@ -435,14 +435,14 @@
 # Add custom Documenter to handle attributes/methods of an AccessorProperty
 # eg pandas.Series.str and pandas.Series.dt (see GH9322)
 
-import sphinx  # noqa: E402 isort:skip
-from sphinx.util import rpartition  # noqa: E402 isort:skip
-from sphinx.ext.autodoc import (  # noqa: E402 isort:skip
+import sphinx
+from sphinx.util import rpartition
+from sphinx.ext.autodoc import (
     AttributeDocumenter,
     Documenter,
     MethodDocumenter,
 )
-from sphinx.ext.autosummary import Autosummary  # noqa: E402 isort:skip
+from sphinx.ext.autosummary import Autosummary
 
 
 class AccessorDocumenter(MethodDocumenter):
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index e341dcb8318bc..34498bbbbec1a 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -448,7 +448,7 @@ registers the default "matplotlib" backend as follows.
 .. code-block:: python
 
    # in setup.py
-   setup(  # noqa: F821
+   setup(
        ...,
        entry_points={
            "pandas_plotting_backends": [
diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst
index 41520795bde62..12e524adbd414 100644
--- a/doc/source/getting_started/10min.rst
+++ b/doc/source/getting_started/10min.rst
@@ -65,7 +65,7 @@ will be completed:
 .. ipython::
 
    @verbatim
-   In [1]: df2.<TAB>  # noqa: E225, E999
+   In [1]: df2.<TAB>
    df2.A                  df2.bool
    df2.abs                df2.boxplot
    df2.add                df2.C
diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index 9e18951fe3f4c..402b8149bf10d 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -882,5 +882,5 @@ completion mechanism so they can be tab-completed:
 
 .. code-block:: ipython
 
-    In [5]: df.fo<TAB>  # noqa: E225, E999
+    In [5]: df.fo<TAB>
     df.foo1  df.foo2
diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst
index c6eadd2adadce..0c5433d2c5018 100644
--- a/doc/source/user_guide/advanced.rst
+++ b/doc/source/user_guide/advanced.rst
@@ -327,13 +327,13 @@ As usual, **both sides** of the slicers are included as this is label indexing.
 
    .. code-block:: python
 
-      df.loc[(slice('A1', 'A3'), ...), :]             # noqa: E999
+      df.loc[(slice('A1', 'A3'), ...), :]
 
    You should **not** do this:
  
    .. code-block:: python
 
-      df.loc[(slice('A1', 'A3'), ...)]                # noqa: E999
+      df.loc[(slice('A1', 'A3'), ...)]
 
 .. ipython:: python
 
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index bc00cd7f13e13..fc8bd9231ca5a 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -250,7 +250,7 @@ These object provide tab-completion of the available methods and properties.
 
 .. code-block:: ipython
 
-   In [14]: r.<TAB>                                          # noqa: E225, E999
+   In [14]: r.<TAB>
    r.agg         r.apply       r.count       r.exclusions  r.max         r.median      r.name        r.skew        r.sum
    r.aggregate   r.corr        r.cov         r.kurt        r.mean        r.min         r.quantile    r.std         r.var
 
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index 2df5b9d82dcc3..4736abcb47050 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -73,7 +73,7 @@ four calls) using the `prun ipython magic function <http://ipython.org/ipython-d
 
 .. ipython:: python
 
-   %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)  # noqa E999
+   %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
 
 By far the majority of time is spend inside either ``integrate_f`` or ``f``,
 hence we'll concentrate our efforts cythonizing these two functions.
@@ -387,13 +387,13 @@ Consider the following toy example of doubling each observation:
 
 
    @numba.vectorize
-   def double_every_value_withnumba(x):  # noqa E501
+   def double_every_value_withnumba(x):
        return x * 2
 
 .. code-block:: ipython
 
    # Custom function without numba
-   In [5]: %timeit df['col1_doubled'] = df['a'].apply(double_every_value_nonumba)  # noqa E501
+   In [5]: %timeit df['col1_doubled'] = df['a'].apply(double_every_value_nonumba)
    1000 loops, best of 3: 797 us per loop
 
    # Standard implementation (faster than a custom function)
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 8cd229070e365..dd2f738d56428 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -248,7 +248,7 @@ the length of the ``groups`` dict, so it is largely just a convenience:
 .. ipython::
 
    @verbatim
-   In [1]: gb.<TAB>  # noqa: E225, E999
+   In [1]: gb.<TAB>
    gb.agg        gb.boxplot    gb.cummin     gb.describe   gb.filter     gb.get_group  gb.height     gb.last       gb.median     gb.ngroups    gb.plot       gb.rank       gb.std        gb.transform
    gb.aggregate  gb.count      gb.cumprod    gb.dtype      gb.first      gb.groups     gb.hist       gb.max        gb.min        gb.nth        gb.prod       gb.resample   gb.sum        gb.var
    gb.apply      gb.cummax     gb.cumsum     gb.fillna     gb.gender     gb.head       gb.indices    gb.mean       gb.name       gb.ohlc       gb.quantile   gb.size       gb.tail       gb.weight
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 173bcf7537154..025b03762a7f6 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3191,7 +3191,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
    writer = pd.ExcelWriter('path_to_file.xlsx', engine='xlsxwriter')
 
    # Or via pandas configuration.
-   from pandas import options  # noqa: E402
+   from pandas import options
    options.io.excel.xlsx.writer = 'xlsxwriter'
 
    df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 6c36a6470f841..c7daee74ebd32 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -65,7 +65,7 @@ Series and DataFrame objects:
 
    .. ipython:: python
 
-      None == None                                                 # noqa: E711
+      None == None
       np.nan == np.nan
 
    So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information.
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 609969b666726..541b00160b9ea 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -129,7 +129,7 @@ You can also create these other plots using the methods ``DataFrame.plot.<kind>`
 
     In [14]: df = pd.DataFrame()
 
-    In [15]: df.plot.<TAB>  # noqa: E225, E999
+    In [15]: df.plot.<TAB>
     df.plot.area     df.plot.barh     df.plot.density  df.plot.hist     df.plot.line     df.plot.scatter
     df.plot.bar      df.plot.box      df.plot.hexbin   df.plot.kde      df.plot.pie
 
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst
index ab48594ddadab..421fd32c81f40 100644
--- a/doc/source/whatsnew/v0.13.0.rst
+++ b/doc/source/whatsnew/v0.13.0.rst
@@ -776,12 +776,12 @@ Experimental
      :suppress:
 
      try:
-         del a  # noqa: F821
+         del a
      except NameError:
          pass
 
      try:
-         del b  # noqa: F821
+         del b
      except NameError:
          pass
 
@@ -798,17 +798,17 @@ Experimental
      :suppress:
 
      try:
-         del a  # noqa: F821
+         del a
      except NameError:
          pass
 
      try:
-         del b  # noqa: F821
+         del b
      except NameError:
          pass
 
      try:
-         del c  # noqa: F821
+         del c
      except NameError:
          pass
 
diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst
index 6242c40d44bf8..863275787fc73 100644
--- a/doc/source/whatsnew/v0.13.1.rst
+++ b/doc/source/whatsnew/v0.13.1.rst
@@ -307,7 +307,7 @@ Enhancements
       Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
       Minor_axis axis: A to D
 
-      In [37]: result['ItemA']                           # noqa E999
+      In [37]: result['ItemA']
       Out[37]:
                         A         B         C         D
       2000-01-03 -0.535778  1.500802 -1.506416 -0.681456
diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst
index 25a75492d78fb..095e6f9e09b22 100644
--- a/doc/source/whatsnew/v0.14.0.rst
+++ b/doc/source/whatsnew/v0.14.0.rst
@@ -500,13 +500,13 @@ See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :is
 
   .. code-block:: python
 
-     >>> df.loc[(slice('A1', 'A3'), ...), :]  # noqa: E901
+     >>> df.loc[(slice('A1', 'A3'), ...), :]
 
    rather than this:
 
   .. code-block:: python
 
-     >>> df.loc[(slice('A1', 'A3'), ...)]  # noqa: E901
+     >>> df.loc[(slice('A1', 'A3'), ...)]
 
 .. warning::
 
diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst
index c27ada6ef3b58..1ae6bb99bdc38 100644
--- a/doc/source/whatsnew/v0.15.0.rst
+++ b/doc/source/whatsnew/v0.15.0.rst
@@ -612,8 +612,8 @@ Improvements in the sql io module
 
   .. code-block:: python
 
-         df.to_sql('table', engine, schema='other_schema')  # noqa F821
-         pd.read_sql_table('table', engine, schema='other_schema')  # noqa F821
+         df.to_sql('table', engine, schema='other_schema')
+         pd.read_sql_table('table', engine, schema='other_schema')
 
 - Added support for writing ``NaN`` values with ``to_sql`` (:issue:`2754`).
 - Added support for writing datetime64 columns with ``to_sql`` for all database flavors (:issue:`7103`).
diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst
index b58eabaed6127..682d3b57e2b2c 100644
--- a/doc/source/whatsnew/v0.15.2.rst
+++ b/doc/source/whatsnew/v0.15.2.rst
@@ -149,7 +149,7 @@ Other enhancements:
   .. code-block:: python
 
      from sqlalchemy.types import String
-     data.to_sql('data_dtype', engine, dtype={'Col_1': String})  # noqa F821
+     data.to_sql('data_dtype', engine, dtype={'Col_1': String})
 
 - ``Series.all`` and ``Series.any`` now support the ``level`` and ``skipna`` parameters (:issue:`8302`):
 
diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst
index 543f9c6bbf300..20bdc697138e9 100644
--- a/doc/source/whatsnew/v0.16.2.rst
+++ b/doc/source/whatsnew/v0.16.2.rst
@@ -41,16 +41,16 @@ The goal is to avoid confusing nested function calls like
 
    # df is a DataFrame
    # f, g, and h are functions that take and return DataFrames
-   f(g(h(df), arg1=1), arg2=2, arg3=3)  # noqa F821
+   f(g(h(df), arg1=1), arg2=2, arg3=3)
 
 The logic flows from inside out, and function names are separated from their keyword arguments.
 This can be rewritten as
 
 .. code-block:: python
 
-   (df.pipe(h)                   # noqa F821
-      .pipe(g, arg1=1)           # noqa F821
-      .pipe(f, arg2=2, arg3=3)   # noqa F821
+   (df.pipe(h)
+      .pipe(g, arg1=1)
+      .pipe(f, arg2=2, arg3=3)
    )
 
 Now both the code and the logic flow from top to bottom. Keyword arguments are next to
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst
index 67abad659dc8d..1a93768527a7b 100644
--- a/doc/source/whatsnew/v0.17.0.rst
+++ b/doc/source/whatsnew/v0.17.0.rst
@@ -170,7 +170,7 @@ As a result of this change, these methods are now all discoverable via tab-compl
 .. ipython::
     :verbatim:
 
-    In [15]: df.plot.<TAB>  # noqa: E225, E999
+    In [15]: df.plot.<TAB>
     df.plot.area     df.plot.barh     df.plot.density  df.plot.hist     df.plot.line     df.plot.scatter
     df.plot.bar      df.plot.box      df.plot.hexbin   df.plot.kde      df.plot.pie
 
diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst
index 55080240f2a55..2372a18f8ea06 100644
--- a/doc/source/whatsnew/v0.17.1.rst
+++ b/doc/source/whatsnew/v0.17.1.rst
@@ -80,7 +80,7 @@ Enhancements
 
   .. ipython:: python
 
-     df = pd.DataFrame({'A': ['foo'] * 1000})  # noqa: F821
+     df = pd.DataFrame({'A': ['foo'] * 1000})
      df['B'] = df['A'].astype('category')
 
      # shows the '+' as we have object dtypes
diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst
index a7174c6325f86..c25e8bbb245df 100644
--- a/doc/source/whatsnew/v0.18.0.rst
+++ b/doc/source/whatsnew/v0.18.0.rst
@@ -97,7 +97,7 @@ with tab-completion of available methods and properties.
 
 .. code-block:: ipython
 
-   In [9]: r.<TAB>  # noqa E225, E999
+   In [9]: r.<TAB>
    r.A           r.agg         r.apply       r.count       r.exclusions  r.max         r.median      r.name        r.skew        r.sum
    r.B           r.aggregate   r.corr        r.cov         r.kurt        r.mean        r.min         r.quantile    r.std         r.var
 
diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst
index a89d1461073bd..7f8918d7fd21f 100644
--- a/doc/source/whatsnew/v0.19.1.rst
+++ b/doc/source/whatsnew/v0.19.1.rst
@@ -8,7 +8,7 @@ v0.19.1 (November 3, 2016)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a minor bug-fix release from 0.19.0 and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst
index 023bc78081ec9..3ee9de5b37e59 100644
--- a/doc/source/whatsnew/v0.19.2.rst
+++ b/doc/source/whatsnew/v0.19.2.rst
@@ -8,7 +8,7 @@ v0.19.2 (December 24, 2016)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst
index 232d1d283d9bd..5bb1346000699 100644
--- a/doc/source/whatsnew/v0.20.2.rst
+++ b/doc/source/whatsnew/v0.20.2.rst
@@ -8,7 +8,7 @@ v0.20.2 (June 4, 2017)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst
index 72faabd95bf1f..0d098133f21b5 100644
--- a/doc/source/whatsnew/v0.20.3.rst
+++ b/doc/source/whatsnew/v0.20.3.rst
@@ -8,7 +8,7 @@ v0.20.3 (July 7, 2017)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes
diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst
index 34b610e8af0b3..4417d0ec21a4b 100644
--- a/doc/source/whatsnew/v0.21.0.rst
+++ b/doc/source/whatsnew/v0.21.0.rst
@@ -8,7 +8,7 @@ v0.21.0 (October 27, 2017)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a major release from 0.20.3 and includes a number of API changes, deprecations, new features,
diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst
index 64f3339834b38..26ed1fdcc4de7 100644
--- a/doc/source/whatsnew/v0.21.1.rst
+++ b/doc/source/whatsnew/v0.21.1.rst
@@ -8,7 +8,7 @@ v0.21.1 (December 12, 2017)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst
index ea36b35d61740..fc8679a1ef4a9 100644
--- a/doc/source/whatsnew/v0.22.0.rst
+++ b/doc/source/whatsnew/v0.22.0.rst
@@ -8,7 +8,7 @@ v0.22.0 (December 29, 2017)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a major release from 0.21.1 and includes a single, API-breaking change.
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index f4c283ea742f7..9a0f2f7f023f4 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -8,7 +8,7 @@ What's new in 0.23.0 (May 15, 2018)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 This is a major release from 0.22.0 and includes a number of API changes,
diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst
index 37c52ac7bb34e..22dcc094fbceb 100644
--- a/doc/source/whatsnew/v0.5.0.rst
+++ b/doc/source/whatsnew/v0.5.0.rst
@@ -9,7 +9,7 @@ v.0.5.0 (October 24, 2011)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 New features
diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst
index 973ba897b3234..99f71da6edca9 100644
--- a/doc/source/whatsnew/v0.6.0.rst
+++ b/doc/source/whatsnew/v0.6.0.rst
@@ -8,7 +8,7 @@ v.0.6.0 (November 25, 2011)
 .. ipython:: python
    :suppress:
 
-   from pandas import * # noqa F401, F403
+   from pandas import *
 
 
 New features
diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst
index 020cf3bdc2d59..f0278e0089256 100644
--- a/doc/source/whatsnew/v0.7.3.rst
+++ b/doc/source/whatsnew/v0.7.3.rst
@@ -23,7 +23,7 @@ New features
 .. code-block:: python
 
    from pandas.tools.plotting import scatter_matrix
-   scatter_matrix(df, alpha=0.2)        # noqa F821
+   scatter_matrix(df, alpha=0.2)
 
 
 - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for
@@ -31,12 +31,12 @@ New features
 
 .. code-block:: python
 
-   df.plot(kind='bar', stacked=True)    # noqa F821
+   df.plot(kind='bar', stacked=True)
 
 
 .. code-block:: python
 
-   df.plot(kind='barh', stacked=True)   # noqa F821
+   df.plot(kind='barh', stacked=True)
 
 
 - Add log x and y :ref:`scaling options <visualization.basic>` to
diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py
index 65936a9fcdbf3..7effbd7da9685 100644
--- a/pandas/_config/__init__.py
+++ b/pandas/_config/__init__.py
@@ -16,7 +16,7 @@
     "options",
 ]
 from pandas._config import config
-from pandas._config import dates  # noqa:F401
+from pandas._config import dates
 from pandas._config.config import (
     describe_option,
     get_option,
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 3f12ec4c15fc7..9670cdb0f0bd2 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -710,7 +710,7 @@ cdef class TextReader:
         # header is now a list of lists, so field_count should use header[0]
 
         cdef:
-            Py_ssize_t i, start, field_count, passed_count, unnamed_count  # noqa
+            Py_ssize_t i, start, field_count, passed_count, unnamed_count
             char *word
             object name, old_name
             int status
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 0f1657480e4b3..5dd4d2b54e702 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -34,7 +34,7 @@ from pandas._libs.tslibs.conversion cimport (
     get_datetime64_nanos)
 
 # many modules still look for NaT and iNaT here despite them not being needed
-from pandas._libs.tslibs.nattype import nat_strings, iNaT  # noqa:F821
+from pandas._libs.tslibs.nattype import nat_strings, iNaT
 from pandas._libs.tslibs.nattype cimport (
     checknull_with_nat, NPY_NAT, c_NaT as NaT)
 
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 0bd4b78d51e4e..bd49af5e2d42f 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -389,7 +389,7 @@ class NaTType(_NaT):
     # nan methods
     weekday = _make_nan_func('weekday', datetime.weekday.__doc__)
     isoweekday = _make_nan_func('isoweekday', datetime.isoweekday.__doc__)
-    month_name = _make_nan_func('month_name',  # noqa:E128
+    month_name = _make_nan_func('month_name',
         """
         Return the month name of the Timestamp with specified locale.
 
@@ -404,7 +404,7 @@ class NaTType(_NaT):
 
         .. versionadded:: 0.23.0
         """)
-    day_name = _make_nan_func('day_name',  # noqa:E128
+    day_name = _make_nan_func('day_name',
         """
         Return the day name of the Timestamp with specified locale.
 
@@ -438,7 +438,7 @@ class NaTType(_NaT):
     # The remaining methods have docstrings copy/pasted from the analogous
     # Timestamp methods.
 
-    strptime = _make_error_func('strptime',  # noqa:E128
+    strptime = _make_error_func('strptime',
         """
         Timestamp.strptime(string, format)
 
@@ -446,28 +446,28 @@ class NaTType(_NaT):
         """
     )
 
-    utcfromtimestamp = _make_error_func('utcfromtimestamp',  # noqa:E128
+    utcfromtimestamp = _make_error_func('utcfromtimestamp',
         """
         Timestamp.utcfromtimestamp(ts)
 
         Construct a naive UTC datetime from a POSIX timestamp.
         """
     )
-    fromtimestamp = _make_error_func('fromtimestamp',  # noqa:E128
+    fromtimestamp = _make_error_func('fromtimestamp',
         """
         Timestamp.fromtimestamp(ts)
 
         timestamp[, tz] -> tz's local time from POSIX timestamp.
         """
     )
-    combine = _make_error_func('combine',  # noqa:E128
+    combine = _make_error_func('combine',
         """
         Timestamp.combine(date, time)
 
         date, time -> datetime with same date and time fields
         """
     )
-    utcnow = _make_error_func('utcnow',  # noqa:E128
+    utcnow = _make_error_func('utcnow',
         """
         Timestamp.utcnow()
 
@@ -475,12 +475,12 @@ class NaTType(_NaT):
         """
     )
 
-    timestamp = _make_error_func('timestamp',  # noqa:E128
+    timestamp = _make_error_func('timestamp',
         """Return POSIX timestamp as float.""")
 
     # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or
     # return NaT create functions that raise, for binding to NaTType
-    astimezone = _make_error_func('astimezone',  # noqa:E128
+    astimezone = _make_error_func('astimezone',
         """
         Convert tz-aware Timestamp to another time zone.
 
@@ -499,7 +499,7 @@ class NaTType(_NaT):
         TypeError
             If Timestamp is tz-naive.
         """)
-    fromordinal = _make_error_func('fromordinal',  # noqa:E128
+    fromordinal = _make_error_func('fromordinal',
         """
         Timestamp.fromordinal(ordinal, freq=None, tz=None)
 
@@ -517,14 +517,14 @@ class NaTType(_NaT):
         """)
 
     # _nat_methods
-    to_pydatetime = _make_nat_func('to_pydatetime',  # noqa:E128
+    to_pydatetime = _make_nat_func('to_pydatetime',
         """
         Convert a Timestamp object to a native Python datetime object.
 
         If warn=True, issue a warning if nanoseconds is nonzero.
         """)
 
-    now = _make_nat_func('now',  # noqa:E128
+    now = _make_nat_func('now',
         """
         Timestamp.now(tz=None)
 
@@ -536,7 +536,7 @@ class NaTType(_NaT):
         tz : str or timezone object, default None
             Timezone to localize to.
         """)
-    today = _make_nat_func('today',  # noqa:E128
+    today = _make_nat_func('today',
         """
         Timestamp.today(cls, tz=None)
 
@@ -549,7 +549,7 @@ class NaTType(_NaT):
         tz : str or timezone object, default None
             Timezone to localize to.
         """)
-    round = _make_nat_func('round',  # noqa:E128
+    round = _make_nat_func('round',
         """
         Round the Timestamp to the specified resolution.
 
@@ -590,7 +590,7 @@ timedelta}, default 'raise'
         ------
         ValueError if the freq cannot be converted
         """)
-    floor = _make_nat_func('floor',  # noqa:E128
+    floor = _make_nat_func('floor',
         """
         return a new Timestamp floored to this resolution.
 
@@ -627,7 +627,7 @@ timedelta}, default 'raise'
         ------
         ValueError if the freq cannot be converted.
         """)
-    ceil = _make_nat_func('ceil',  # noqa:E128
+    ceil = _make_nat_func('ceil',
         """
         return a new Timestamp ceiled to this resolution.
 
@@ -665,7 +665,7 @@ timedelta}, default 'raise'
         ValueError if the freq cannot be converted.
         """)
 
-    tz_convert = _make_nat_func('tz_convert',  # noqa:E128
+    tz_convert = _make_nat_func('tz_convert',
         """
         Convert tz-aware Timestamp to another time zone.
 
@@ -684,7 +684,7 @@ timedelta}, default 'raise'
         TypeError
             If Timestamp is tz-naive.
         """)
-    tz_localize = _make_nat_func('tz_localize',  # noqa:E128
+    tz_localize = _make_nat_func('tz_localize',
         """
         Convert naive Timestamp to local time zone, or remove
         timezone from tz-aware Timestamp.
@@ -749,7 +749,7 @@ default 'raise'
         TypeError
             If the Timestamp is tz-aware and tz is not None.
         """)
-    replace = _make_nat_func('replace',  # noqa:E128
+    replace = _make_nat_func('replace',
         """
         implements datetime.replace, handles nanoseconds.
 
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 445eff9e19e47..2dc5a9e671d34 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -17,12 +17,12 @@
 # and use a string literal forward reference to it in subsequent types
 # https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
 if TYPE_CHECKING:
-    from pandas._libs import Period, Timedelta, Timestamp  # noqa: F401
-    from pandas.core.arrays.base import ExtensionArray  # noqa: F401
-    from pandas.core.dtypes.dtypes import ExtensionDtype  # noqa: F401
-    from pandas.core.indexes.base import Index  # noqa: F401
-    from pandas.core.series import Series  # noqa: F401
-    from pandas.core.generic import NDFrame  # noqa: F401
+    from pandas._libs import Period, Timedelta, Timestamp
+    from pandas.core.arrays.base import ExtensionArray
+    from pandas.core.dtypes.dtypes import ExtensionDtype
+    from pandas.core.indexes.base import Index
+    from pandas.core.series import Series
+    from pandas.core.generic import NDFrame
 
 
 AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index 58422811990c4..c8666327250b5 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,2 +1,2 @@
 """ public toolkit API """
-from . import extensions, types  # noqa
+from . import extensions, types
diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py
index 573d700dac43d..f7da81febaca8 100644
--- a/pandas/api/extensions/__init__.py
+++ b/pandas/api/extensions/__init__.py
@@ -1,13 +1,13 @@
 """Public API for extending pandas objects."""
-from pandas.core.dtypes.dtypes import (  # noqa: F401
+from pandas.core.dtypes.dtypes import (
     ExtensionDtype,
     register_extension_dtype,
 )
 
-from pandas.core.accessor import (  # noqa: F401
+from pandas.core.accessor import (
     register_dataframe_accessor,
     register_index_accessor,
     register_series_accessor,
 )
-from pandas.core.algorithms import take  # noqa: F401
-from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin  # noqa: F401
+from pandas.core.algorithms import take
+from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
diff --git a/pandas/api/types/__init__.py b/pandas/api/types/__init__.py
index f32e1abe28cc1..1c40a0d31b662 100644
--- a/pandas/api/types/__init__.py
+++ b/pandas/api/types/__init__.py
@@ -1,10 +1,10 @@
 """ public toolkit API """
 
-from pandas._libs.lib import infer_dtype  # noqa: F401
+from pandas._libs.lib import infer_dtype
 
-from pandas.core.dtypes.api import *  # noqa: F403, F401
-from pandas.core.dtypes.concat import union_categoricals  # noqa: F401
-from pandas.core.dtypes.dtypes import (  # noqa: F401
+from pandas.core.dtypes.api import *
+from pandas.core.dtypes.concat import union_categoricals
+from pandas.core.dtypes.dtypes import (
     CategoricalDtype,
     DatetimeTZDtype,
     IntervalDtype,
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 3a36713ccdbda..acb1cd4d4724e 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -20,7 +20,7 @@ def load_reduce(self):
     func = stack[-1]
 
     if len(args) and type(args[0]) is type:
-        n = args[0].__name__  # noqa
+        n = args[0].__name__
 
     try:
         stack[-1] = func(*args)
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index fc60c01d7b808..8b6374945ef90 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -221,7 +221,7 @@ def decorator(accessor):
 
 .. code-block:: python
 
-    def __init__(self, pandas_object):  # noqa: E999
+    def __init__(self, pandas_object):
         ...
 
 For consistency with pandas methods, you should raise an ``AttributeError``
diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py
index 868118bac6a7b..786703429b5bf 100644
--- a/pandas/core/arrays/__init__.py
+++ b/pandas/core/arrays/__init__.py
@@ -1,14 +1,14 @@
-from .base import (  # noqa: F401
+from .base import (
     ExtensionArray,
     ExtensionOpsMixin,
     ExtensionScalarOpsMixin,
 )
-from .categorical import Categorical  # noqa: F401
-from .datetimes import DatetimeArray  # noqa: F401
-from .integer import IntegerArray, integer_array  # noqa: F401
-from .interval import IntervalArray  # noqa: F401
-from .numpy_ import PandasArray, PandasDtype  # noqa: F401
-from .period import PeriodArray, period_array  # noqa: F401
-from .sparse import SparseArray  # noqa: F401
-from .string_ import StringArray  # noqa: F401
-from .timedeltas import TimedeltaArray  # noqa: F401
+from .categorical import Categorical
+from .datetimes import DatetimeArray
+from .integer import IntegerArray, integer_array
+from .interval import IntervalArray
+from .numpy_ import PandasArray, PandasDtype
+from .period import PeriodArray, period_array
+from .sparse import SparseArray
+from .string_ import StringArray
+from .timedeltas import TimedeltaArray
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 565f5076fdddb..a9451d8110be1 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -25,7 +25,7 @@
 )
 from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
 from pandas.core.dtypes.inference import _iterable_not_string
-from pandas.core.dtypes.missing import isna, isnull, notnull  # noqa
+from pandas.core.dtypes.missing import isna, isnull, notnull
 
 
 class SettingWithCopyError(ValueError):
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 81658ab23ba46..55c8359cdbbd7 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -529,7 +529,7 @@ def __init__(self, where, queryables=None, encoding=None, scope_level=0):
                 else:
                     w = _validate_where(w)
                     where[idx] = w
-            where = " & ".join(map("({})".format, com.flatten(where)))  # noqa
+            where = " & ".join(map("({})".format, com.flatten(where)))
 
         self.expr = where
         self.env = Scope(scope_level + 1, local_dict=local_dict)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 3f4ebc88c1c8a..8eef5113cfcc4 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -25,7 +25,7 @@
     ABCPeriodIndex,
     ABCSeries,
 )
-from pandas.core.dtypes.inference import (  # noqa:F401
+from pandas.core.dtypes.inference import (
     is_array_like,
     is_bool,
     is_complex,
diff --git a/pandas/core/groupby/__init__.py b/pandas/core/groupby/__init__.py
index 252f20ed40068..c6e1875d94c0e 100644
--- a/pandas/core/groupby/__init__.py
+++ b/pandas/core/groupby/__init__.py
@@ -1,7 +1,7 @@
-from pandas.core.groupby.generic import (  # noqa: F401
+from pandas.core.groupby.generic import (
     DataFrameGroupBy,
     NamedAgg,
     SeriesGroupBy,
 )
-from pandas.core.groupby.groupby import GroupBy  # noqa: F401
-from pandas.core.groupby.grouper import Grouper  # noqa: F401
+from pandas.core.groupby.groupby import GroupBy
+from pandas.core.groupby.grouper import Grouper
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 695823e29ef1b..0fd55d2bd7752 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -480,7 +480,7 @@ def _transform_fast(self, func, func_nm):
             out = self._try_cast(out, self.obj)
         return Series(out, index=self.obj.index, name=self.obj.name)
 
-    def filter(self, func, dropna=True, *args, **kwargs):  # noqa
+    def filter(self, func, dropna=True, *args, **kwargs):
         """
         Return a copy of a Series excluding elements from groups that
         do not satisfy the boolean criterion specified by func.
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index d7eaaca5ac83a..39d4c3c8d1600 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -284,7 +284,7 @@ def __init__(
             if self.name is None:
                 self.name = index.names[level]
 
-            self.grouper, self._labels, self._group_index = index._get_grouper_for_level(  # noqa: E501
+            self.grouper, self._labels, self._group_index = index._get_grouper_for_level(
                 self.grouper, level
             )
 
diff --git a/pandas/core/index.py b/pandas/core/index.py
index d308ac1a9b1c7..7469a663e3e84 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1,4 +1,4 @@
-from pandas.core.indexes.api import (  # noqa:F401
+from pandas.core.indexes.api import (
     CategoricalIndex,
     DatetimeIndex,
     Float64Index,
@@ -22,4 +22,4 @@
     ensure_index,
     ensure_index_from_sequences,
 )
-from pandas.core.indexes.multi import _sparsify  # noqa:F401
+from pandas.core.indexes.multi import _sparsify
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index cc8ecc0e64684..fa17035ef3ddb 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -277,7 +277,7 @@ def components(self):
         2     0      0        0        2             0             0            0
         3     0      0        0        3             0             0            0
         4     0      0        0        4             0             0            0
-        """  # noqa: E501
+        """
         return self._get_values().components.set_index(self._parent.index)
 
     @property
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 86d55ce2e7cc3..5fa9fc96c2f32 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -10,19 +10,19 @@
     ensure_index,
     ensure_index_from_sequences,
 )
-from pandas.core.indexes.base import InvalidIndexError  # noqa:F401
-from pandas.core.indexes.category import CategoricalIndex  # noqa:F401
+from pandas.core.indexes.base import InvalidIndexError
+from pandas.core.indexes.category import CategoricalIndex
 from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas.core.indexes.interval import IntervalIndex  # noqa:F401
-from pandas.core.indexes.multi import MultiIndex  # noqa:F401
-from pandas.core.indexes.numeric import (  # noqa:F401
+from pandas.core.indexes.interval import IntervalIndex
+from pandas.core.indexes.multi import MultiIndex
+from pandas.core.indexes.numeric import (
     Float64Index,
     Int64Index,
     NumericIndex,
     UInt64Index,
 )
 from pandas.core.indexes.period import PeriodIndex
-from pandas.core.indexes.range import RangeIndex  # noqa:F401
+from pandas.core.indexes.range import RangeIndex
 from pandas.core.indexes.timedeltas import TimedeltaIndex
 
 _sort_msg = textwrap.dedent(
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index e5a8edb56e413..b0120e48c394e 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -128,20 +128,20 @@ class CategoricalIndex(Index, accessor.PandasDelegate):
     Examples
     --------
     >>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
-    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category')  # noqa
+    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category')
 
     ``CategoricalIndex`` can also be instantiated from a ``Categorical``:
 
     >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
     >>> pd.CategoricalIndex(c)
-    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category')  # noqa
+    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category')
 
     Ordered ``CategoricalIndex`` can have a min and max value.
 
     >>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True,
     ...                          categories=['c', 'b', 'a'])
     >>> ci
-    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category')  # noqa
+    CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category')
     >>> ci.min()
     'c'
     """
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 45cb037600fd7..0dc8558bb3bcb 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1190,7 +1190,7 @@ def _validate_read_indexer(
             KeyError in the future, you can use .reindex() as an alternative.
 
             See the documentation here:
-            https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"""  # noqa: E501
+            https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"""
             )
 
             if not (ax.is_categorical() or ax.is_interval()):
@@ -1320,7 +1320,7 @@ class _IXIndexer(_NDFrameIndexer):
         .iloc for positional indexing
 
         See the documentation here:
-        http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated"""  # noqa: E501
+        http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated"""
     )
 
     def __init__(self, name, obj):
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index 8ac0df2fa4e0a..234a6ba73356f 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -1,4 +1,4 @@
-from .blocks import (  # noqa: F401
+from .blocks import (
     Block,
     BoolBlock,
     CategoricalBlock,
@@ -11,18 +11,18 @@
     ObjectBlock,
     TimeDeltaBlock,
 )
-from .managers import (  # noqa: F401
+from .managers import (
     BlockManager,
     SingleBlockManager,
     create_block_manager_from_arrays,
     create_block_manager_from_blocks,
 )
 
-from .blocks import _safe_reshape  # noqa: F401; io.packers
-from .blocks import make_block  # noqa: F401; io.pytables, io.packers
-from .managers import (  # noqa: F401; reshape.concat, reshape.merge
+from .blocks import _safe_reshape  # io.packers
+from .blocks import make_block  # io.pytables, io.packers
+from .managers import (  # reshape.concat, reshape.merge
     _transform_index,
     concatenate_block_managers,
 )
 
-from .blocks import _block_shape  # noqa:F401; io.pytables
+from .blocks import _block_shape  # io.pytables
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 36e1b06230d7e..373781f0c0413 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -385,7 +385,7 @@ def is_uniform_join_units(join_units):
     return (
         # all blocks need to have the same type
         all(type(ju.block) is type(join_units[0].block) for ju in join_units)
-        and  # noqa
+        and
         # no blocks that would get missing values (can lead to type upcasts)
         # unless we're an extension dtype.
         all(not ju.is_na or ju.block.is_extension for ju in join_units)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 398fa9b0c1fc0..706b679e70da0 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -28,8 +28,8 @@
     define_na_arithmetic_op,
     logical_op,
 )
-from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY  # noqa:F401
-from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op  # noqa:F401
+from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY
+from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op
 from pandas.core.ops.dispatch import should_series_dispatch
 from pandas.core.ops.docstrings import (
     _arith_doc_FRAME,
@@ -37,12 +37,12 @@
     _make_flex_doc,
     _op_descriptions,
 )
-from pandas.core.ops.invalid import invalid_comparison  # noqa:F401
-from pandas.core.ops.methods import (  # noqa:F401
+from pandas.core.ops.invalid import invalid_comparison
+from pandas.core.ops.methods import (
     add_flex_arithmetic_methods,
     add_special_arithmetic_methods,
 )
-from pandas.core.ops.roperator import (  # noqa:F401
+from pandas.core.ops.roperator import (
     radd,
     rand_,
     rdiv,
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 70143e4603a4b..ff5a9f3002c9a 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -7,7 +7,7 @@
 
 from pandas._libs import tslib, tslibs
 from pandas._libs.tslibs import Timestamp, conversion, parsing
-from pandas._libs.tslibs.parsing import (  # noqa
+from pandas._libs.tslibs.parsing import (
     DateParseError,
     _format_is_iso,
     _guess_datetime_format,
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index fddbea8ed0d7a..277ab09dcf22f 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -124,7 +124,7 @@ def hash_pandas_object(
                     encoding=encoding,
                     hash_key=hash_key,
                     categorize=categorize,
-                ).values  # noqa
+                ).values
                 for _ in [None]
             )
             num_items += 1
diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py
index dcf58a4c0dd5b..81a1b4ff58860 100644
--- a/pandas/core/window/__init__.py
+++ b/pandas/core/window/__init__.py
@@ -1,3 +1,3 @@
-from pandas.core.window.ewm import EWM  # noqa:F401
-from pandas.core.window.expanding import Expanding, ExpandingGroupby  # noqa:F401
-from pandas.core.window.rolling import Rolling, RollingGroupby, Window  # noqa:F401
+from pandas.core.window.ewm import EWM
+from pandas.core.window.expanding import Expanding, ExpandingGroupby
+from pandas.core.window.rolling import Rolling, RollingGroupby, Window
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 0f2920b3558c9..48dbd4d3b8266 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -32,7 +32,7 @@ class _GroupByMixin(GroupByMixin):
     """
 
     def __init__(self, obj, *args, **kwargs):
-        parent = kwargs.pop("parent", None)  # noqa
+        parent = kwargs.pop("parent", None)
         groupby = kwargs.pop("groupby", None)
         if groupby is None:
             groupby, obj = obj, obj.obj
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index caa928731fb3a..3d6cd7c8dc15b 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -72,14 +72,14 @@ def determine_clipboard():
             # qtpy is a small abstraction layer that lets you write
             # applications using a single api call to either PyQt or PySide
             # https://pypi.org/project/QtPy
-            import qtpy  # noqa
+            import qtpy
         except ImportError:
             # If qtpy isn't installed, fall back on importing PyQt5, or PyQt5
             try:
-                import PyQt5  # noqa
+                import PyQt5
             except ImportError:
                 try:
-                    import PyQt4  # noqa
+                    import PyQt4
                 except ImportError:
                     pass  # fail fast for all non-ImportError exceptions.
                 else:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 0b8594bbbd3e4..861de9184d7aa 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -22,7 +22,7 @@
     Type,
     Union,
 )
-from urllib.parse import (  # noqa
+from urllib.parse import (
     urlencode,
     urljoin,
     urlparse as parse_url,
@@ -33,7 +33,7 @@
 import zipfile
 
 from pandas.compat import _get_lzma_file, _import_lzma
-from pandas.errors import (  # noqa
+from pandas.errors import (
     AbstractMethodError,
     DtypeWarning,
     EmptyDataError,
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index 7f8f2fbea2352..d823e9311c27c 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -65,7 +65,7 @@ def check_main():
         return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
 
     try:
-        return __IPYTHON__ or check_main()  # noqa
+        return __IPYTHON__ or check_main()
     except NameError:
         return check_main()
 
@@ -75,7 +75,7 @@ def in_ipython_frontend():
     check if we're inside an an IPython zmq frontend
     """
     try:
-        ip = get_ipython()  # noqa
+        ip = get_ipython()
         return "zmq" in str(type(ip)).lower()
     except NameError:
         pass
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 490c574463b9b..f5d20c79725ad 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -843,7 +843,7 @@ def _parser_dispatch(flavor):
         if not _HAS_BS4:
             raise ImportError("BeautifulSoup4 (bs4) not found, please install it")
         # Although we call this above, we want to raise here right before use.
-        bs4 = import_optional_dependency("bs4")  # noqa:F841
+        bs4 = import_optional_dependency("bs4")
 
     else:
         if not _HAS_LXML:
diff --git a/pandas/io/msgpack/__init__.py b/pandas/io/msgpack/__init__.py
index 7107263c180cb..1aa724799de33 100644
--- a/pandas/io/msgpack/__init__.py
+++ b/pandas/io/msgpack/__init__.py
@@ -2,8 +2,8 @@
 
 from collections import namedtuple
 
-from pandas.io.msgpack.exceptions import *  # noqa: F401,F403 isort:skip
-from pandas.io.msgpack._version import version  # noqa: F401 isort:skip
+from pandas.io.msgpack.exceptions import *
+from pandas.io.msgpack._version import version
 
 
 class ExtType(namedtuple("ExtType", "code data")):
@@ -19,14 +19,14 @@ def __new__(cls, code, data):
         return super().__new__(cls, code, data)
 
 
-import os  # noqa: F401,E402 isort:skip
+import os
 
-from pandas.io.msgpack._unpacker import (  # noqa: F401,E402 isort:skip
+from pandas.io.msgpack._unpacker import (
     Unpacker,
     unpack,
     unpackb,
 )
-from pandas.io.msgpack._packer import Packer  # noqa: E402 isort:skip
+from pandas.io.msgpack._packer import Packer
 
 
 def pack(o, stream, **kwargs):
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index c0ace7996e1b9..a138bcbbe7bda 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -61,7 +61,7 @@
     pandas_dtype,
 )
 
-from pandas import (  # noqa:F401
+from pandas import (
     Categorical,
     CategoricalIndex,
     DataFrame,
@@ -132,7 +132,7 @@ def to_msgpack(path_or_buf, *args, **kwargs):
         "It is recommended to use pyarrow for on-the-wire "
         "transmission of pandas objects.\n"
         "For a full example, check\n"
-        "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_msgpack.html",  # noqa: E501
+        "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_msgpack.html",
         FutureWarning,
         stacklevel=3,
     )
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index c82486532530f..31365794b4f1d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1918,7 +1918,7 @@ def __init__(self, src, **kwds):
         else:
             if len(self._reader.header) > 1:
                 # we have a multi index in the columns
-                self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns(  # noqa: E501
+                self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns(
                     self._reader.header, self.index_names, self.col_names, passed_names
                 )
             else:
@@ -2307,7 +2307,7 @@ def __init__(self, f, **kwds):
         # The original set is stored in self.original_columns.
         if len(self.columns) > 1:
             # we are processing a multi index column
-            self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns(  # noqa: E501
+            self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns(
                 self.columns, self.index_names, self.col_names
             )
             # Update list of original names to include all indices.
diff --git a/pandas/io/sas/__init__.py b/pandas/io/sas/__init__.py
index fa6b29a1a3fcc..fccd26a20dad6 100644
--- a/pandas/io/sas/__init__.py
+++ b/pandas/io/sas/__init__.py
@@ -1 +1 @@
-from .sasreader import read_sas  # noqa
+from .sasreader import read_sas
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 822b3288c82d9..8af895ce1fce4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -48,7 +48,7 @@ def _is_sqlalchemy_connectable(con):
             _SQLALCHEMY_INSTALLED = False
 
     if _SQLALCHEMY_INSTALLED:
-        import sqlalchemy  # noqa: F811
+        import sqlalchemy
 
         return isinstance(con, sqlalchemy.engine.Connectable)
     else:
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 237ec17f56974..f24366ffcb9a6 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -214,19 +214,19 @@ def test_constructor(self):
         #  - when the first is an integer dtype and the second is not
         #  - when the resulting codes are all -1/NaN
         with tm.assert_produces_warning(None):
-            c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])  # noqa
+            c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
 
         with tm.assert_produces_warning(None):
-            c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5])  # noqa
+            c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5])
 
         # the next one are from the old docs
         with tm.assert_produces_warning(None):
-            c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])  # noqa
+            c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
             cat = Categorical([1, 2], categories=[1, 2, 3])
 
         # this is a legitimate constructor
         with tm.assert_produces_warning(None):
-            c = Categorical(  # noqa
+            c = Categorical(
                 np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
             )
 
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index 9321813b42b33..ad2df30766fae 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -74,7 +74,7 @@ def test_unicode_print(self):
         expected = """\
 [ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
 Length: 60
-Categories (3, object): [ああああ, いいいいい, ううううううう]"""  # noqa
+Categories (3, object): [ああああ, いいいいい, ううううううう]"""
 
         assert repr(c) == expected
 
@@ -85,7 +85,7 @@ def test_unicode_print(self):
             c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
             expected = """[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
 Length: 60
-Categories (3, object): [ああああ, いいいいい, ううううううう]"""  # noqa
+Categories (3, object): [ああああ, いいいいい, ううううううう]"""
 
             assert repr(c) == expected
 
@@ -212,14 +212,14 @@ def test_categorical_repr_datetime_ordered(self):
         c = Categorical(idx, ordered=True)
         exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
 Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
-                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""  # noqa
+                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
 
         assert repr(c) == exp
 
         c = Categorical(idx.append(idx), categories=idx, ordered=True)
         exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
 Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
-                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""  # noqa
+                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
 
         assert repr(c) == exp
 
@@ -228,7 +228,7 @@ def test_categorical_repr_datetime_ordered(self):
         exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
 Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
                                              2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
-                                             2011-01-01 13:00:00-05:00]"""  # noqa
+                                             2011-01-01 13:00:00-05:00]"""
 
         assert repr(c) == exp
 
@@ -236,7 +236,7 @@ def test_categorical_repr_datetime_ordered(self):
         exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
 Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
                                              2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
-                                             2011-01-01 13:00:00-05:00]"""  # noqa
+                                             2011-01-01 13:00:00-05:00]"""
 
         assert repr(c) == exp
 
@@ -256,14 +256,14 @@ def test_categorical_repr_period(self):
         c = Categorical(idx)
         exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
 Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(c) == exp
 
         c = Categorical(idx.append(idx), categories=idx)
         exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
 Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(c) == exp
 
@@ -276,7 +276,7 @@ def test_categorical_repr_period(self):
 
         c = Categorical(idx.append(idx), categories=idx)
         exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""  # noqa
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
 
         assert repr(c) == exp
 
@@ -285,14 +285,14 @@ def test_categorical_repr_period_ordered(self):
         c = Categorical(idx, ordered=True)
         exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
 Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(c) == exp
 
         c = Categorical(idx.append(idx), categories=idx, ordered=True)
         exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
 Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(c) == exp
 
@@ -305,7 +305,7 @@ def test_categorical_repr_period_ordered(self):
 
         c = Categorical(idx.append(idx), categories=idx, ordered=True)
         exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""  # noqa
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
 
         assert repr(c) == exp
 
@@ -319,7 +319,7 @@ def test_categorical_repr_timedelta(self):
 
         c = Categorical(idx.append(idx), categories=idx)
         exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""  # noqa
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
 
         assert repr(c) == exp
 
@@ -329,7 +329,7 @@ def test_categorical_repr_timedelta(self):
 Length: 20
 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
                                    3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
-                                   18 days 01:00:00, 19 days 01:00:00]"""  # noqa
+                                   18 days 01:00:00, 19 days 01:00:00]"""
 
         assert repr(c) == exp
 
@@ -338,7 +338,7 @@ def test_categorical_repr_timedelta(self):
 Length: 40
 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
                                    3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
-                                   18 days 01:00:00, 19 days 01:00:00]"""  # noqa
+                                   18 days 01:00:00, 19 days 01:00:00]"""
 
         assert repr(c) == exp
 
@@ -346,13 +346,13 @@ def test_categorical_repr_timedelta_ordered(self):
         idx = timedelta_range("1 days", periods=5)
         c = Categorical(idx, ordered=True)
         exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""  # noqa
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
 
         assert repr(c) == exp
 
         c = Categorical(idx.append(idx), categories=idx, ordered=True)
         exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""  # noqa
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
 
         assert repr(c) == exp
 
@@ -362,7 +362,7 @@ def test_categorical_repr_timedelta_ordered(self):
 Length: 20
 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
                                    3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
-                                   18 days 01:00:00 < 19 days 01:00:00]"""  # noqa
+                                   18 days 01:00:00 < 19 days 01:00:00]"""
 
         assert repr(c) == exp
 
@@ -371,26 +371,26 @@ def test_categorical_repr_timedelta_ordered(self):
 Length: 40
 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
                                    3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
-                                   18 days 01:00:00 < 19 days 01:00:00]"""  # noqa
+                                   18 days 01:00:00 < 19 days 01:00:00]"""
 
         assert repr(c) == exp
 
     def test_categorical_index_repr(self):
         idx = CategoricalIndex(Categorical([1, 2, 3]))
-        exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
         assert repr(idx) == exp
 
         i = CategoricalIndex(Categorical(np.arange(10)))
-        exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
     def test_categorical_index_repr_ordered(self):
         i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
-        exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""  # noqa
+        exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
         assert repr(i) == exp
 
         i = CategoricalIndex(Categorical(np.arange(10), ordered=True))
-        exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""  # noqa
+        exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
         assert repr(i) == exp
 
     def test_categorical_index_repr_datetime(self):
@@ -399,7 +399,7 @@ def test_categorical_index_repr_datetime(self):
         exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
                   '2011-01-01 11:00:00', '2011-01-01 12:00:00',
                   '2011-01-01 13:00:00'],
-                 categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -408,7 +408,7 @@ def test_categorical_index_repr_datetime(self):
         exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
                   '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
                   '2011-01-01 13:00:00-05:00'],
-                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -418,7 +418,7 @@ def test_categorical_index_repr_datetime_ordered(self):
         exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
                   '2011-01-01 11:00:00', '2011-01-01 12:00:00',
                   '2011-01-01 13:00:00'],
-                 categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -427,7 +427,7 @@ def test_categorical_index_repr_datetime_ordered(self):
         exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
                   '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
                   '2011-01-01 13:00:00-05:00'],
-                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -437,7 +437,7 @@ def test_categorical_index_repr_datetime_ordered(self):
                   '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
                   '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
                   '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
-                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -445,24 +445,24 @@ def test_categorical_index_repr_period(self):
         # test all length
         idx = period_range("2011-01-01 09:00", freq="H", periods=1)
         i = CategoricalIndex(Categorical(idx))
-        exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
         idx = period_range("2011-01-01 09:00", freq="H", periods=2)
         i = CategoricalIndex(Categorical(idx))
-        exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
         idx = period_range("2011-01-01 09:00", freq="H", periods=3)
         i = CategoricalIndex(Categorical(idx))
-        exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
         idx = period_range("2011-01-01 09:00", freq="H", periods=5)
         i = CategoricalIndex(Categorical(idx))
         exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
                   '2011-01-01 12:00', '2011-01-01 13:00'],
-                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
 
         assert repr(i) == exp
 
@@ -471,13 +471,13 @@ def test_categorical_index_repr_period(self):
                   '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
                   '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
                   '2011-01-01 13:00'],
-                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
 
         assert repr(i) == exp
 
         idx = period_range("2011-01", freq="M", periods=5)
         i = CategoricalIndex(Categorical(idx))
-        exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
     def test_categorical_index_repr_period_ordered(self):
@@ -485,19 +485,19 @@ def test_categorical_index_repr_period_ordered(self):
         i = CategoricalIndex(Categorical(idx, ordered=True))
         exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
                   '2011-01-01 12:00', '2011-01-01 13:00'],
-                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""  # noqa
+                 categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
 
         assert repr(i) == exp
 
         idx = period_range("2011-01", freq="M", periods=5)
         i = CategoricalIndex(Categorical(idx, ordered=True))
-        exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
         assert repr(i) == exp
 
     def test_categorical_index_repr_timedelta(self):
         idx = timedelta_range("1 days", periods=5)
         i = CategoricalIndex(Categorical(idx))
-        exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
         assert repr(i) == exp
 
         idx = timedelta_range("1 hours", periods=10)
@@ -506,14 +506,14 @@ def test_categorical_index_repr_timedelta(self):
                   '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
                   '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
                   '9 days 01:00:00'],
-                 categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""  # noqa
+                 categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
 
         assert repr(i) == exp
 
     def test_categorical_index_repr_timedelta_ordered(self):
         idx = timedelta_range("1 days", periods=5)
         i = CategoricalIndex(Categorical(idx, ordered=True))
-        exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""  # noqa
+        exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
         assert repr(i) == exp
 
         idx = timedelta_range("1 hours", periods=10)
@@ -522,6 +522,6 @@ def test_categorical_index_repr_timedelta_ordered(self):
                   '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
                   '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
                   '9 days 01:00:00'],
-                 categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""  # noqa
+                 categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
 
         assert repr(i) == exp
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index c02d8ae4e7429..81f7777fd52ec 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -659,13 +659,13 @@ def test_getslice_tuple(self):
         dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
 
         sparse = SparseArray(dense)
-        res = sparse[4:,]  # noqa: E231
-        exp = SparseArray(dense[4:,])  # noqa: E231
+        res = sparse[4:,]
+        exp = SparseArray(dense[4:,])
         tm.assert_sp_array_equal(res, exp)
 
         sparse = SparseArray(dense, fill_value=0)
-        res = sparse[4:,]  # noqa: E231
-        exp = SparseArray(dense[4:,], fill_value=0)  # noqa: E231
+        res = sparse[4:,]
+        exp = SparseArray(dense[4:,], fill_value=0)
         tm.assert_sp_array_equal(res, exp)
 
         with pytest.raises(IndexError):
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index a6836c58348b3..2cb0e192f4b21 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -446,10 +446,10 @@ def test_check_integrity(self):
 
         # 0-length OK
         # TODO: index variables are not used...is that right?
-        index = BlockIndex(0, locs, lengths)  # noqa
+        index = BlockIndex(0, locs, lengths)
 
         # also OK even though empty
-        index = BlockIndex(1, locs, lengths)  # noqa
+        index = BlockIndex(1, locs, lengths)
 
         msg = "Block 0 extends beyond end"
         with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index b3fbd8c17d8bf..375590e31c737 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -30,7 +30,7 @@ def test_compat():
 @pytest.mark.parametrize("parser", expr._parsers)
 def test_invalid_numexpr_version(engine, parser):
     def testit():
-        a, b = 1, 2  # noqa
+        a, b = 1, 2
         res = pd.eval("a + b", engine=engine, parser=parser)
         assert res == 3
 
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 4d40cd3a2d4ca..2c19e10bb7b6b 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -58,7 +58,7 @@
         )
         for engine in _engines
     )
-)  # noqa
+)
 def engine(request):
     return request.param
 
@@ -687,7 +687,7 @@ def test_disallow_scalar_bool_ops(self):
         exprs += ("2 * x > 2 or 1 and 2",)
         exprs += ("2 * df > 3 and 1 or a",)
 
-        x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))  # noqa
+        x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))
         for ex in exprs:
             with pytest.raises(NotImplementedError):
                 pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -720,7 +720,7 @@ def test_identical(self):
         tm.assert_numpy_array_equal(result, np.array([1.5]))
         assert result.shape == (1,)
 
-        x = np.array([False])  # noqa
+        x = np.array([False])
         result = pd.eval("x", engine=self.engine, parser=self.parser)
         tm.assert_numpy_array_equal(result, np.array([False]))
         assert result.shape == (1,)
@@ -1204,7 +1204,7 @@ def test_single_variable(self):
     def test_truediv(self):
         s = np.array([1])
         ex = "s / 1"
-        d = {"s": s}  # noqa
+        d = {"s": s}
 
         res = self.eval(ex, truediv=False)
         tm.assert_numpy_array_equal(res, np.array([1.0]))
@@ -1229,7 +1229,7 @@ def test_truediv(self):
         assert res == expec
 
     def test_failing_subscript_with_name_error(self):
-        df = DataFrame(np.random.randn(5, 3))  # noqa
+        df = DataFrame(np.random.randn(5, 3))
         with pytest.raises(NameError):
             self.eval("df[x > 2] > 2")
 
@@ -1296,7 +1296,7 @@ def test_assignment_column(self):
         # with a local name overlap
         def f():
             df = orig_df.copy()
-            a = 1  # noqa
+            a = 1
             df.eval("a = 1 + b", inplace=True)
             return df
 
@@ -1308,7 +1308,7 @@ def f():
         df = orig_df.copy()
 
         def f():
-            a = 1  # noqa
+            a = 1
             old_a = df.a.copy()
             df.eval("a = a + b", inplace=True)
             result = old_a + df.b
@@ -1620,7 +1620,7 @@ def setup_class(cls):
         cls.arith_ops = filter(lambda x: x not in ("in", "not in"), cls.arith_ops)
 
     def test_check_many_exprs(self):
-        a = 1  # noqa
+        a = 1
         expr = " * ".join("a" * 33)
         expected = 1
         res = pd.eval(expr, engine=self.engine, parser=self.parser)
@@ -1660,13 +1660,13 @@ def test_fails_not(self):
             )
 
     def test_fails_ampersand(self):
-        df = DataFrame(np.random.randn(5, 3))  # noqa
+        df = DataFrame(np.random.randn(5, 3))
         ex = "(df + 2)[df > 1] > 0 & (df > 0)"
         with pytest.raises(NotImplementedError):
             pd.eval(ex, parser=self.parser, engine=self.engine)
 
     def test_fails_pipe(self):
-        df = DataFrame(np.random.randn(5, 3))  # noqa
+        df = DataFrame(np.random.randn(5, 3))
         ex = "(df + 2)[df > 1] > 0 | (df > 0)"
         with pytest.raises(NotImplementedError):
             pd.eval(ex, parser=self.parser, engine=self.engine)
@@ -1856,7 +1856,7 @@ def test_global_scope(self, engine, parser):
         )
 
     def test_no_new_locals(self, engine, parser):
-        x = 1  # noqa
+        x = 1
         lcls = locals().copy()
         pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser)
         lcls2 = locals().copy()
@@ -1864,7 +1864,7 @@ def test_no_new_locals(self, engine, parser):
         assert lcls == lcls2
 
     def test_no_new_globals(self, engine, parser):
-        x = 1  # noqa
+        x = 1
         gbls = globals().copy()
         pd.eval("x + 1", engine=engine, parser=parser)
         gbls2 = globals().copy()
@@ -1917,7 +1917,7 @@ def test_name_error_exprs(engine, parser):
 
 
 def test_invalid_local_variable_reference(engine, parser):
-    a, b = 1, 2  # noqa
+    a, b = 1, 2
     exprs = "a + @b", "@a + b", "@a + @b"
 
     for _expr in exprs:
@@ -1963,9 +1963,9 @@ def test_more_than_one_expression_raises(engine, parser):
 def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
     gen = {int: lambda: np.random.randint(10), float: np.random.randn}
 
-    mid = gen[lhs]()  # noqa
-    lhs = gen[lhs]()  # noqa
-    rhs = gen[rhs]()  # noqa
+    mid = gen[lhs]()
+    lhs = gen[lhs]()
+    rhs = gen[rhs]()
 
     ex1 = "lhs {0} mid {1} rhs".format(cmp, cmp)
     ex2 = "lhs {0} mid and mid {1} rhs".format(cmp, cmp)
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index aeec12b9ad14e..1adbb9b136dd1 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1112,7 +1112,7 @@ def test_to_object_array_tuples(self):
             record = namedtuple("record", "x y")
             r = record(5, 6)
             values = [r]
-            result = lib.to_object_array_tuples(values)  # noqa
+            result = lib.to_object_array_tuples(values)
         except ImportError:
             pass
 
@@ -1416,7 +1416,7 @@ def test_nan_to_nat_conversions():
 
 @td.skip_if_no_scipy
 @pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
-def test_is_scipy_sparse(spmatrix):  # noqa: F811
+def test_is_scipy_sparse(spmatrix):
     assert is_scipy_sparse(spmatrix([[0, 1]]))
     assert not is_scipy_sparse(np.array([1]))
 
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 090df35bd94c9..7beb460857a3a 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -40,21 +40,21 @@ class TestMyDtype(BaseDtypeTests):
 ``assert_series_equal`` on your base test class.
 
 """
-from .casting import BaseCastingTests  # noqa
-from .constructors import BaseConstructorsTests  # noqa
-from .dtype import BaseDtypeTests  # noqa
-from .getitem import BaseGetitemTests  # noqa
-from .groupby import BaseGroupbyTests  # noqa
-from .interface import BaseInterfaceTests  # noqa
-from .io import BaseParsingTests  # noqa
-from .methods import BaseMethodsTests  # noqa
-from .missing import BaseMissingTests  # noqa
-from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil  # noqa
-from .printing import BasePrintingTests  # noqa
-from .reduce import (  # noqa
+from .casting import BaseCastingTests
+from .constructors import BaseConstructorsTests
+from .dtype import BaseDtypeTests
+from .getitem import BaseGetitemTests
+from .groupby import BaseGroupbyTests
+from .interface import BaseInterfaceTests
+from .io import BaseParsingTests
+from .methods import BaseMethodsTests
+from .missing import BaseMissingTests
+from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil
+from .printing import BasePrintingTests
+from .reduce import (
     BaseBooleanReduceTests,
     BaseNoReduceTests,
     BaseNumericReduceTests,
 )
-from .reshaping import BaseReshapingTests  # noqa
-from .setitem import BaseSetitemTests  # noqa
+from .reshaping import BaseReshapingTests
+from .setitem import BaseSetitemTests
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 017cbea7ec723..5abeddec28ad5 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -237,7 +237,7 @@ def test_set_index_pass_arrays_duplicate(
         # cannot drop the same column twice;
         # use "is" because == would give ambiguous Boolean error for containers
         first_drop = (
-            False if (keys[0] is "A" and keys[1] is "A") else drop  # noqa: F632
+            False if (keys[0] is "A" and keys[1] is "A") else drop
         )
         # to test against already-tested behaviour, we add sequentially,
         # hence second append always True; must wrap keys in list, otherwise
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index e99208ac78e15..424e83dd42b53 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -883,7 +883,7 @@ def test_stat_op_api(self, float_frame, float_string_frame):
         assert_stat_op_api("median", float_frame, float_string_frame)
 
         try:
-            from scipy.stats import skew, kurtosis  # noqa:F401
+            from scipy.stats import skew, kurtosis
 
             assert_stat_op_api("skew", float_frame, float_string_frame)
             assert_stat_op_api("kurt", float_frame, float_string_frame)
@@ -910,14 +910,14 @@ def sem(x):
             return np.std(x, ddof=1) / np.sqrt(len(x))
 
         def skewness(x):
-            from scipy.stats import skew  # noqa:F811
+            from scipy.stats import skew
 
             if len(x) < 3:
                 return np.nan
             return skew(x, bias=False)
 
         def kurt(x):
-            from scipy.stats import kurtosis  # noqa:F811
+            from scipy.stats import kurtosis
 
             if len(x) < 4:
                 return np.nan
@@ -962,7 +962,7 @@ def kurt(x):
         )
 
         try:
-            from scipy import skew, kurtosis  # noqa:F401
+            from scipy import skew, kurtosis
 
             assert_stat_op_calc("skew", skewness, float_frame_with_na)
             assert_stat_op_calc("kurt", kurt, float_frame_with_na)
@@ -1477,7 +1477,7 @@ def test_sum_bools(self):
     def test_cumsum_corner(self):
         dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))
         # ?(wesm)
-        result = dm.cumsum()  # noqa
+        result = dm.cumsum()
 
     def test_cumsum(self, datetime_frame):
         datetime_frame.loc[5:10, 0] = np.nan
@@ -1496,7 +1496,7 @@ def test_cumsum(self, datetime_frame):
 
         # works
         df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
-        result = df.cumsum()  # noqa
+        result = df.cumsum()
 
         # fix issue
         cumsum_xs = datetime_frame.cumsum(axis=1)
@@ -1548,7 +1548,7 @@ def test_cummin(self, datetime_frame):
 
         # it works
         df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
-        result = df.cummin()  # noqa
+        result = df.cummin()
 
         # fix issue
         cummin_xs = datetime_frame.cummin(axis=1)
@@ -1571,7 +1571,7 @@ def test_cummax(self, datetime_frame):
 
         # it works
         df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
-        result = df.cummax()  # noqa
+        result = df.cummax()
 
         # fix issue
         cummax_xs = datetime_frame.cummax(axis=1)
@@ -2140,7 +2140,7 @@ def test_round(self):
         nan_round_Series = Series({"col1": np.nan, "col2": 1})
 
         # TODO(wesm): unused?
-        expected_nan_round = DataFrame(  # noqa
+        expected_nan_round = DataFrame(
             {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]}
         )
 
@@ -2462,7 +2462,7 @@ def test_dot(self):
         A = DataFrame(a)
 
         # TODO(wesm): unused
-        B = DataFrame(b)  # noqa
+        B = DataFrame(b)
 
         # it works
         result = A.dot(b)
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 1ef10ea5857d0..99fabac2e4583 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -596,7 +596,7 @@ def test_align_float(self, float_frame):
         diff_a_vals = af.reindex(diff_a).values
 
         # TODO(wesm): unused?
-        diff_b_vals = bf.reindex(diff_b).values  # noqa
+        diff_b_vals = bf.reindex(diff_b).values
 
         assert (diff_a_vals == -1).all()
 
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 1b6ee91317996..1cc782df298db 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -73,7 +73,7 @@ def test_consolidate(self, float_frame):
         assert len(float_frame._data.blocks) == 1
 
     def test_consolidate_inplace(self, float_frame):
-        frame = float_frame.copy()  # noqa
+        frame = float_frame.copy()
 
         # triggers in-place consolidation
         for letter in range(ord("A"), ord("Z")):
@@ -82,7 +82,7 @@ def test_consolidate_inplace(self, float_frame):
     def test_values_consolidate(self, float_frame):
         float_frame["E"] = 7.0
         assert not float_frame._data.is_consolidated()
-        _ = float_frame.values  # noqa
+        _ = float_frame.values
         assert float_frame._data.is_consolidated()
 
     def test_modify_values(self, float_frame):
@@ -409,8 +409,8 @@ def test_is_mixed_type(self, float_frame, float_string_frame):
 
     def test_get_numeric_data(self):
         # TODO(wesm): unused?
-        intname = np.dtype(np.int_).name  # noqa
-        floatname = np.dtype(np.float_).name  # noqa
+        intname = np.dtype(np.int_).name
+        floatname = np.dtype(np.float_).name
 
         datetime64name = np.dtype("M8[ns]").name
         objectname = np.dtype(np.object_).name
@@ -572,8 +572,8 @@ def test_stale_cached_series_bug_473(self):
             Y["e"] = Y["e"].astype("object")
             Y["g"]["c"] = np.NaN
             repr(Y)
-            result = Y.sum()  # noqa
-            exp = Y["g"].sum()  # noqa
+            result = Y.sum()
+            exp = Y["g"].sum()
             assert pd.isna(Y["g"]["c"])
 
     def test_get_X_columns(self):
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index aa00cf234d9ee..f22661918bc06 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -84,8 +84,8 @@ def test_constructor_mixed(self, float_string_frame):
         index, data = tm.getMixedTypeDict()
 
         # TODO(wesm), incomplete test?
-        indexed_frame = DataFrame(data, index=index)  # noqa
-        unindexed_frame = DataFrame(data)  # noqa
+        indexed_frame = DataFrame(data, index=index)
+        unindexed_frame = DataFrame(data)
 
         assert float_string_frame["foo"].dtype == np.object_
 
@@ -2035,7 +2035,7 @@ def test_from_records_to_records(self):
         arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
 
         # TODO(wesm): unused
-        frame = DataFrame.from_records(arr)  # noqa
+        frame = DataFrame.from_records(arr)
 
         index = pd.Index(np.arange(len(arr))[::-1])
         indexed_frame = DataFrame.from_records(arr, index=index)
@@ -2132,7 +2132,7 @@ def test_from_records_columns_not_modified(self):
         columns = ["a", "b", "c"]
         original_columns = list(columns)
 
-        df = DataFrame.from_records(tuples, columns=columns, index="a")  # noqa
+        df = DataFrame.from_records(tuples, columns=columns, index="a")
 
         assert columns == original_columns
 
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 00be13b1c0e72..560e8acfc56b8 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -575,14 +575,14 @@ def test_astype_with_view_float(self, float_frame):
 
         # TODO(wesm): verification?
         tf = float_frame.astype(np.float64)
-        casted = tf.astype(np.int64, copy=False)  # noqa
+        casted = tf.astype(np.int64, copy=False)
 
     def test_astype_with_view_mixed_float(self, mixed_float_frame):
 
         tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
 
         casted = tf.astype(np.int64)
-        casted = tf.astype(np.float32)  # noqa
+        casted = tf.astype(np.float32)
 
     @pytest.mark.parametrize("dtype", [np.int32, np.int64])
     @pytest.mark.parametrize("val", [np.nan, np.inf])
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 6d239e96cd167..1c340f018a8c1 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -918,7 +918,7 @@ def test_getitem_fancy_slice_integers_step(self):
         df = DataFrame(np.random.randn(10, 5))
 
         # this is OK
-        result = df.iloc[:8:2]  # noqa
+        result = df.iloc[:8:2]
         df.iloc[:8:2] = np.nan
         assert isna(df.iloc[:8:2]).values.all()
 
@@ -1475,7 +1475,7 @@ def test_setitem_fancy_scalar(self, float_frame):
 
         # individual value
         for j, col in enumerate(f.columns):
-            ts = f[col]  # noqa
+            ts = f[col]
             for idx in f.index[::5]:
                 i = f.index.get_loc(idx)
                 val = np.random.randn()
@@ -1621,7 +1621,7 @@ def test_getitem_setitem_float_labels(self):
             cp.iloc[1.0:5] = 0
 
         with pytest.raises(TypeError):
-            result = cp.iloc[1.0:5] == 0  # noqa
+            result = cp.iloc[1.0:5] == 0
 
         assert result.values.all()
         assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 94667ecfa837d..9448a12b458b0 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -15,7 +15,7 @@
 
 def _skip_if_no_pchip():
     try:
-        from scipy.interpolate import pchip_interpolate  # noqa
+        from scipy.interpolate import pchip_interpolate
     except ImportError:
         import pytest
 
@@ -710,7 +710,7 @@ def test_fill_corner(self, float_frame, float_string_frame):
         empty_float = float_frame.reindex(columns=[])
 
         # TODO(wesm): unused?
-        result = empty_float.fillna(value=0)  # noqa
+        result = empty_float.fillna(value=0)
 
     def test_fill_value_when_combine_const(self):
         # GH12723
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index f5f6c9ad6b3da..8f87013d95813 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -102,7 +102,7 @@ def test_ops(self):
                 ("/", "__truediv__", "__rtruediv__"),
             ]:
 
-                base = DataFrame(  # noqa
+                base = DataFrame(
                     np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
                 )
 
@@ -472,7 +472,7 @@ def test_query_scope(self):
 
         df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
 
-        a, b = 1, 2  # noqa
+        a, b = 1, 2
         res = df.query("a > b", engine=engine, parser=parser)
         expected = df[df.a > df.b]
         assert_frame_equal(res, expected)
@@ -618,7 +618,7 @@ def test_chained_cmp_and_in(self):
         )
         ind = (
             (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
-        )  # noqa
+        )
         expec = df[ind]
         assert_frame_equal(res, expec)
 
@@ -641,7 +641,7 @@ def test_local_variable_with_in(self):
     def test_at_inside_string(self):
         engine, parser = self.engine, self.parser
         skip_if_no_pandas_parser(parser)
-        c = 1  # noqa
+        c = 1
         df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
         result = df.query('a == "@c"', engine=engine, parser=parser)
         expected = df[df.a == "@c"]
@@ -660,7 +660,7 @@ def test_query_undefined_local(self):
             df.query("a == @c", engine=engine, parser=parser)
 
     def test_index_resolvers_come_after_columns_with_the_same_name(self):
-        n = 1  # noqa
+        n = 1
         a = np.r_[20:101:20]
 
         df = DataFrame({"index": a, "b": np.random.randn(a.size)})
@@ -777,7 +777,7 @@ def test_nested_scope(self):
         engine = self.engine
         parser = self.parser
         # smoke test
-        x = 1  # noqa
+        x = 1
         result = pd.eval("x + 1", engine=engine, parser=parser)
         assert result == 2
 
@@ -1019,7 +1019,7 @@ def test_query_string_scalar_variable(self, parser, engine):
             }
         )
         e = df[df.Symbol == "BUD US"]
-        symb = "BUD US"  # noqa
+        symb = "BUD US"
         r = df.query("Symbol == @symb", parser=parser, engine=engine)
         assert_frame_equal(e, r)
 
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 48f42b5f101ce..758cb1ecd7f59 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -30,17 +30,17 @@
 class TestDataFrameReprInfoEtc(TestData):
     def test_repr_empty(self):
         # empty
-        foo = repr(self.empty)  # noqa
+        foo = repr(self.empty)
 
         # empty with index
         frame = DataFrame(index=np.arange(1000))
-        foo = repr(frame)  # noqa
+        foo = repr(frame)
 
     def test_repr_mixed(self):
         buf = StringIO()
 
         # mixed
-        foo = repr(self.mixed_frame)  # noqa
+        foo = repr(self.mixed_frame)
         self.mixed_frame.info(verbose=False, buf=buf)
 
     @pytest.mark.slow
@@ -52,7 +52,7 @@ def test_repr_mixed_big(self):
         biggie.loc[:20, "A"] = np.nan
         biggie.loc[:20, "B"] = np.nan
 
-        foo = repr(biggie)  # noqa
+        foo = repr(biggie)
 
     def test_repr(self):
         buf = StringIO()
@@ -69,7 +69,7 @@ def test_repr(self):
 
         # columns but no index
         no_index = DataFrame(columns=[0, 1, 3])
-        foo = repr(no_index)  # noqa
+        foo = repr(no_index)
 
         # no columns or index
         self.empty.info(buf=buf)
@@ -131,7 +131,7 @@ def test_repr_unicode(self):
         uval = "\u03c3\u03c3\u03c3\u03c3"
 
         # TODO(wesm): is this supposed to be used?
-        bval = uval.encode("utf-8")  # noqa
+        bval = uval.encode("utf-8")
 
         df = DataFrame({"A": [uval, uval]})
 
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index b8708e6ca1871..e213d25826e2a 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -281,7 +281,7 @@ def test_shift(self):
         )
 
         # shift int frame
-        int_shifted = self.intframe.shift(1)  # noqa
+        int_shifted = self.intframe.shift(1)
 
         # Shifting with PeriodIndex
         ps = tm.makePeriodFrame()
@@ -516,11 +516,11 @@ def test_asfreq(self):
 
         tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
 
-        filled = rule_monthly.asfreq("B", method="pad")  # noqa
+        filled = rule_monthly.asfreq("B", method="pad")
         # TODO: actually check that this worked.
 
         # don't forget!
-        filled_dep = rule_monthly.asfreq("B", method="pad")  # noqa
+        filled_dep = rule_monthly.asfreq("B", method="pad")
 
         # test does not blow up on length-0 DataFrame
         zero_length = self.tsframe.reindex([])
@@ -944,7 +944,7 @@ def test_tz_convert_and_localize(self, fn):
             df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
 
             # TODO: untested
-            df5 = getattr(df4, fn)("US/Pacific", level=1)  # noqa
+            df5 = getattr(df4, fn)("US/Pacific", level=1)
 
             assert_index_equal(df3.index.levels[0], l0)
             assert not df3.index.levels[0].equals(l0_expected)
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index aa80c461a00e7..5b453713a6055 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -34,7 +34,7 @@ def test_agg_must_agg(df):
 
 def test_agg_ser_multi_key(df):
     # TODO(wesm): unused
-    ser = df.C  # noqa
+    ser = df.C
 
     f = lambda x: x.sum()
     results = df.C.groupby([df.A, df.B]).aggregate(f)
diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py
index a7f58b9ea78bd..394119d3efcf0 100644
--- a/pandas/tests/indexes/multi/test_format.py
+++ b/pandas/tests/indexes/multi/test_format.py
@@ -200,5 +200,5 @@ def test_tuple_width(self, wide_multi_index):
             ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
             ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
             ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
-           names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""  # noqa
+           names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
         assert result == expected
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index 3dee1dbecf3ba..f495d167ef4bd 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -120,7 +120,7 @@ def test_unsortedindex():
 
 
 def test_unsortedindex_doc_examples():
-    # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex  # noqa
+    # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex
     dfm = DataFrame(
         {"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4)}
     )
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 8ed7f1a890c39..d69444b32b134 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -879,7 +879,7 @@ def test_frame_repr(self):
     def test_string_categorical_index_repr(self):
         # short
         idx = pd.CategoricalIndex(["a", "bb", "ccc"])
-        expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""  # noqa
+        expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
         assert repr(idx) == expected
 
         # multiple lines
@@ -887,7 +887,7 @@ def test_string_categorical_index_repr(self):
         expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
                   'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
                   'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
-                 categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""  # noqa
+                 categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
 
         assert repr(idx) == expected
 
@@ -896,7 +896,7 @@ def test_string_categorical_index_repr(self):
         expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
                   ...
                   'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
-                 categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)"""  # noqa
+                 categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)"""
 
         assert repr(idx) == expected
 
@@ -904,13 +904,13 @@ def test_string_categorical_index_repr(self):
         idx = pd.CategoricalIndex(list("abcdefghijklmmo"))
         expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
                   'm', 'm', 'o'],
-                 categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')"""  # noqa
+                 categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')"""
 
         assert repr(idx) == expected
 
         # short
         idx = pd.CategoricalIndex(["あ", "いい", "ううう"])
-        expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""  # noqa
+        expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
         assert repr(idx) == expected
 
         # multiple lines
@@ -918,7 +918,7 @@ def test_string_categorical_index_repr(self):
         expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
                   'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
                   'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
-                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""  # noqa
+                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
 
         assert repr(idx) == expected
 
@@ -927,7 +927,7 @@ def test_string_categorical_index_repr(self):
         expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
                   ...
                   'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
-                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""  # noqa
+                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
 
         assert repr(idx) == expected
 
@@ -935,7 +935,7 @@ def test_string_categorical_index_repr(self):
         idx = pd.CategoricalIndex(list("あいうえおかきくけこさしすせそ"))
         expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
                   'す', 'せ', 'そ'],
-                 categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""  # noqa
+                 categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
 
         assert repr(idx) == expected
 
@@ -944,7 +944,7 @@ def test_string_categorical_index_repr(self):
 
             # short
             idx = pd.CategoricalIndex(["あ", "いい", "ううう"])
-            expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""  # noqa
+            expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
             assert repr(idx) == expected
 
             # multiple lines
@@ -953,7 +953,7 @@ def test_string_categorical_index_repr(self):
                   'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
                   'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
                   'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
-                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""  # noqa
+                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
 
             assert repr(idx) == expected
 
@@ -964,7 +964,7 @@ def test_string_categorical_index_repr(self):
                   ...
                   'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
                   'あ', 'いい', 'ううう'],
-                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""  # noqa
+                 categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
 
             assert repr(idx) == expected
 
@@ -972,7 +972,7 @@ def test_string_categorical_index_repr(self):
             idx = pd.CategoricalIndex(list("あいうえおかきくけこさしすせそ"))
             expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
                   'さ', 'し', 'す', 'せ', 'そ'],
-                 categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""  # noqa
+                 categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
 
             assert repr(idx) == expected
 
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
index aa73bd728595f..78aaf80b532fb 100644
--- a/pandas/tests/indexing/test_callable.py
+++ b/pandas/tests/indexing/test_callable.py
@@ -17,11 +17,11 @@ def test_frame_loc_callable(self):
         res = df.loc[lambda x: x.A > 2]
         tm.assert_frame_equal(res, df.loc[df.A > 2])
 
-        res = df.loc[lambda x: x.A > 2,]  # noqa: E231
-        tm.assert_frame_equal(res, df.loc[df.A > 2,])  # noqa: E231
+        res = df.loc[lambda x: x.A > 2,]
+        tm.assert_frame_equal(res, df.loc[df.A > 2,])
 
-        res = df.loc[lambda x: x.A > 2,]  # noqa: E231
-        tm.assert_frame_equal(res, df.loc[df.A > 2,])  # noqa: E231
+        res = df.loc[lambda x: x.A > 2,]
+        tm.assert_frame_equal(res, df.loc[df.A > 2,])
 
         res = df.loc[lambda x: x.B == "b", :]
         tm.assert_frame_equal(res, df.loc[df.B == "b", :])
@@ -90,8 +90,8 @@ def test_frame_loc_callable_labels(self):
         res = df.loc[lambda x: ["A", "C"]]
         tm.assert_frame_equal(res, df.loc[["A", "C"]])
 
-        res = df.loc[lambda x: ["A", "C"],]  # noqa: E231
-        tm.assert_frame_equal(res, df.loc[["A", "C"],])  # noqa: E231
+        res = df.loc[lambda x: ["A", "C"],]
+        tm.assert_frame_equal(res, df.loc[["A", "C"],])
 
         res = df.loc[lambda x: ["A", "C"], :]
         tm.assert_frame_equal(res, df.loc[["A", "C"], :])
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index d478fbfa1686d..ed9e131ceaf0a 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -280,8 +280,8 @@ def test_dups_fancy_indexing(self):
         result.columns = list("aaaaaaa")
 
         # TODO(wesm): unused?
-        df_v = df.iloc[:, 4]  # noqa
-        res_v = result.iloc[:, 4]  # noqa
+        df_v = df.iloc[:, 4]
+        res_v = result.iloc[:, 4]
 
         tm.assert_frame_equal(df, result)
 
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 0f1402d7da389..20424932af81e 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -12,7 +12,7 @@
 import pandas.util.testing as tm
 
 jinja2 = pytest.importorskip("jinja2")
-from pandas.io.formats.style import Styler, _get_level_lengths  # noqa  # isort:skip
+from pandas.io.formats.style import Styler, _get_level_lengths
 
 
 class TestStyler:
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 3ceddfc3c1db4..2ed7771e72d60 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -687,7 +687,7 @@ def test_with_large_max_level(self):
                 "CreatedBy.user.family_tree.father.name": "Father001",
                 "CreatedBy.user.family_tree.father.father.Name": "Father002",
                 "CreatedBy.user.family_tree.father.father.father.name": "Father003",
-                "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004",  # noqa: E501
+                "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004",
             }
         ]
         output = nested_to_record(input_data, max_level=max_level)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 8e28740c70bad..82f2e7029f5a7 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1006,7 +1006,7 @@ def test_round_trip_exception_(self):
         ],
     )
     def test_url(self, field, dtype):
-        url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5"  # noqa
+        url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5"
         result = read_json(url, convert_dates=True)
         assert result[field].dtype == dtype
 
diff --git a/pandas/tests/io/msgpack/test_obj.py b/pandas/tests/io/msgpack/test_obj.py
index 03d8807c0922c..e1b8c2add28ce 100644
--- a/pandas/tests/io/msgpack/test_obj.py
+++ b/pandas/tests/io/msgpack/test_obj.py
@@ -53,7 +53,7 @@ def test_bad_hook(self):
         msg = r"can't serialize \(1\+2j\)"
         with pytest.raises(TypeError, match=msg):
             packed = packb([3, 1 + 2j], default=lambda o: o)
-            unpacked = unpackb(packed, use_list=1)  # noqa
+            unpacked = unpackb(packed, use_list=1)
 
     def test_array_hook(self):
         packed = packb([1, 2, 3])
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 3d657418e43cd..8a2fb1f39d6da 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -1138,7 +1138,7 @@ def test_escapechar(all_parsers):
     data = '''SEARCH_TERM,ACTUAL_URL
 "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
 "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
-"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''  # noqa
+"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
 
     parser = all_parsers
     result = parser.read_csv(
@@ -1612,7 +1612,7 @@ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
     ],
 )
 def test_trailing_spaces(all_parsers, kwargs, expected):
-    data = "A B C  \nrandom line with trailing spaces    \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n   \n5.1,NaN,10.0\n"  # noqa
+    data = "A B C  \nrandom line with trailing spaces    \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n   \n5.1,NaN,10.0\n"
     parser = all_parsers
 
     result = parser.read_csv(StringIO(data.replace(",", "  ")), **kwargs)
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index 4dfb8d3bd2dc8..ad1dbf00ba9f4 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -20,10 +20,10 @@ def test_index_col_named(all_parsers, with_header):
 KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
 KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
 KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
-KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""  # noqa
+KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
     header = (
         "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
-    )  # noqa
+    )
 
     if with_header:
         data = header + no_header
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 9ddaccc4d38b7..87bb522521137 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -302,7 +302,7 @@ def test_fwf_regression():
 
 def test_fwf_for_uint8():
     data = """1421302965.213420    PRI=3 PGN=0xef00      DST=0x17 SRC=0x28    04 154 00 00 00 00 00 127
-1421302964.226776    PRI=6 PGN=0xf002               SRC=0x47    243 00 00 255 247 00 00 71"""  # noqa
+1421302964.226776    PRI=6 PGN=0xf002               SRC=0x47    243 00 00 255 247 00 00 71"""
     df = read_fwf(
         StringIO(data),
         colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)],
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 956438f1afdf4..bba7f15c1e11d 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -52,8 +52,8 @@
     read_hdf,
 )
 
-from pandas.io import pytables as pytables  # noqa: E402 isort:skip
-from pandas.io.pytables import TableIterator  # noqa: E402 isort:skip
+from pandas.io import pytables as pytables
+from pandas.io.pytables import TableIterator
 
 
 _default_compressor = "blosc"
@@ -410,7 +410,7 @@ def check(mode):
                 # context
                 if mode in ["r", "r+"]:
                     with pytest.raises(IOError):
-                        with HDFStore(path, mode=mode) as store:  # noqa
+                        with HDFStore(path, mode=mode) as store:
                             pass
                 else:
                     with HDFStore(path, mode=mode) as store:
@@ -2225,12 +2225,12 @@ def test_same_name_scoping(self, setup_path):
             store.put("df", df, format="table")
             expected = df[df.index > pd.Timestamp("20130105")]
 
-            import datetime  # noqa
+            import datetime
 
             result = store.select("df", "index>datetime.datetime(2013,1,5)")
             assert_frame_equal(result, expected)
 
-            from datetime import datetime  # noqa
+            from datetime import datetime
 
             # technically an error, but allow it
             result = store.select("df", "index>datetime.datetime(2013,1,5)")
@@ -2656,14 +2656,14 @@ def test_select_dtypes(self, setup_path):
             _maybe_remove(store, "df")
             store.append("df", df, data_columns=True)
 
-            expected = df[df.boolv == True].reindex(columns=["A", "boolv"])  # noqa
+            expected = df[df.boolv == True].reindex(columns=["A", "boolv"])
             for v in [True, "true", 1]:
                 result = store.select(
                     "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
                 )
                 tm.assert_frame_equal(expected, result)
 
-            expected = df[df.boolv == False].reindex(columns=["A", "boolv"])  # noqa
+            expected = df[df.boolv == False].reindex(columns=["A", "boolv"])
             for v in [False, "false", 0]:
                 result = store.select(
                     "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
@@ -2737,7 +2737,7 @@ def test_select_dtypes(self, setup_path):
             expected = df[df["A"] > 0]
 
             store.append("df", df, data_columns=True)
-            np_zero = np.float64(0)  # noqa
+            np_zero = np.float64(0)
             result = store.select("df", where=["A>np_zero"])
             tm.assert_frame_equal(expected, result)
 
@@ -3266,7 +3266,7 @@ def test_frame_select_complex2(self, setup_path):
             expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
 
             # scope with list like
-            l = selection.index.tolist()  # noqa
+            l = selection.index.tolist()
             store = HDFStore(hh)
             result = store.select("df", where="l1=l")
             assert_frame_equal(result, expected)
@@ -3276,7 +3276,7 @@ def test_frame_select_complex2(self, setup_path):
             assert_frame_equal(result, expected)
 
             # index
-            index = selection.index  # noqa
+            index = selection.index
             result = read_hdf(hh, "df", where="l1=index")
             assert_frame_equal(result, expected)
 
@@ -4465,7 +4465,7 @@ def test_columns_multiindex_modified(self, setup_path):
             )
             cols2load = list("BCD")
             cols2load_original = list(cols2load)
-            df_loaded = read_hdf(path, "df", columns=cols2load)  # noqa
+            df_loaded = read_hdf(path, "df", columns=cols2load)
             assert cols2load_original == cols2load
 
     @ignore_natural_naming_warning
@@ -4666,7 +4666,7 @@ def test_query_compare_column_type(self, setup_path):
         with ensure_clean_store(setup_path) as store:
             store.append("test", df, format="table", data_columns=True)
 
-            ts = pd.Timestamp("2014-01-01")  # noqa
+            ts = pd.Timestamp("2014-01-01")
             result = store.select("test", where="real_date > ts")
             expected = df.loc[[1], :]
             tm.assert_frame_equal(expected, result)
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index ea69245924b0c..3c01dc2a686b7 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -8,7 +8,7 @@
 import pandas.util.testing as tm
 from pandas.util.testing import assert_frame_equal, ensure_clean
 
-from pandas.io.feather_format import read_feather, to_feather  # noqa: E402 isort:skip
+from pandas.io.feather_format import read_feather, to_feather
 
 pyarrow = pytest.importorskip("pyarrow")
 
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index 0bafbab069dd4..e6cc66e3b8217 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -39,14 +39,14 @@
 nan = np.nan
 
 try:
-    import blosc  # NOQA
+    import blosc
 except ImportError:
     _BLOSC_INSTALLED = False
 else:
     _BLOSC_INSTALLED = True
 
 try:
-    import zlib  # NOQA
+    import zlib
 except ImportError:
     _ZLIB_INSTALLED = False
 else:
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 2a95904d5668d..e811cee0473ec 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -21,14 +21,14 @@
 )
 
 try:
-    import pyarrow  # noqa
+    import pyarrow
 
     _HAVE_PYARROW = True
 except ImportError:
     _HAVE_PYARROW = False
 
 try:
-    import fastparquet  # noqa
+    import fastparquet
 
     _HAVE_FASTPARQUET = True
 except ImportError:
@@ -576,7 +576,7 @@ def test_partition_cols_supported(self, fp, df_full):
                 compression=None,
             )
             assert os.path.exists(path)
-            import fastparquet  # noqa: F811
+            import fastparquet
 
             actual_partition_cols = fastparquet.ParquetFile(path, False).cats
             assert len(actual_partition_cols) == 2
@@ -593,7 +593,7 @@ def test_partition_on_supported(self, fp, df_full):
                 partition_on=partition_cols,
             )
             assert os.path.exists(path)
-            import fastparquet  # noqa: F811
+            import fastparquet
 
             actual_partition_cols = fastparquet.ParquetFile(path, False).cats
             assert len(actual_partition_cols) == 2
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 183a47c6039ec..b7c7e0b3f88c8 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1071,7 +1071,7 @@ def test_database_uri_string(self):
         # in sqlalchemy.create_engine -> test passing of this error to user
         try:
             # the rest of this test depends on pg8000's being absent
-            import pg8000  # noqa
+            import pg8000
 
             pytest.skip("pg8000 is installed")
         except ImportError:
@@ -1733,7 +1733,7 @@ def test_notna_dtype(self):
 
         tbl = "notna_dtype_test"
         df.to_sql(tbl, self.conn)
-        returned_df = sql.read_sql_table(tbl, self.conn)  # noqa
+        returned_df = sql.read_sql_table(tbl, self.conn)
         meta = sqlalchemy.schema.MetaData(bind=self.conn)
         meta.reflect()
         if self.flavor == "mysql":
@@ -1935,7 +1935,7 @@ def test_read_procedure(self):
         connection = self.conn.connect()
         trans = connection.begin()
         try:
-            r1 = connection.execute(proc)  # noqa
+            r1 = connection.execute(proc)
             trans.commit()
         except pymysql.Error:
             trans.rollback()
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index a0ec06a2197ae..04931851056a7 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1115,7 +1115,7 @@ def test_read_chunks_117(
 
         pos = 0
         for j in range(5):
-            with warnings.catch_warnings(record=True) as w:  # noqa
+            with warnings.catch_warnings(record=True) as w:
                 warnings.simplefilter("always")
                 try:
                     chunk = itr.read(chunksize)
@@ -1201,7 +1201,7 @@ def test_read_chunks_115(
         )
         pos = 0
         for j in range(5):
-            with warnings.catch_warnings(record=True) as w:  # noqa
+            with warnings.catch_warnings(record=True) as w:
                 warnings.simplefilter("always")
                 try:
                     chunk = itr.read(chunksize)
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 116d924f5a596..3a0cd9e5f2503 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -89,7 +89,7 @@ def test_boxplot_return_type_none(self):
     @pytest.mark.slow
     def test_boxplot_return_type_legacy(self):
         # API change in https://github.com/pandas-dev/pandas/pull/7096
-        import matplotlib as mpl  # noqa
+        import matplotlib as mpl
 
         df = DataFrame(
             np.random.randn(6, 4),
@@ -386,7 +386,7 @@ def test_grouped_box_layout(self):
         )
         self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
 
-        box = df.groupby("classroom").boxplot(  # noqa
+        box = df.groupby("classroom").boxplot(
             column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
         )
         self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index fd66888fc30e4..60c27a2338276 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -3254,7 +3254,7 @@ def test_plot_no_numeric_data(self):
 def _generate_4_axes_via_gridspec():
     import matplotlib.pyplot as plt
     import matplotlib as mpl
-    import matplotlib.gridspec  # noqa
+    import matplotlib.gridspec
 
     gs = mpl.gridspec.GridSpec(2, 2)
     ax_tl = plt.subplot(gs[0, 0])
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 14cb2bc9d7b62..4171cf83ca962 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -118,7 +118,7 @@ def test_hist_no_overlap(self):
     @pytest.mark.slow
     def test_hist_by_no_extra_plots(self):
         df = self.hist_df
-        axes = df.height.hist(by=df.gender)  # noqa
+        axes = df.height.hist(by=df.gender)
         assert len(self.plt.get_fignums()) == 1
 
     @pytest.mark.slow
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 51e309130e45d..28836b878fc6e 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -21,7 +21,7 @@
 # a fixture value can be overridden by the test parameter value. Note that the
 # value of the fixture can be overridden this way even if the test doesn't use
 # it directly (doesn't mention it in the function prototype).
-# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization  # noqa
+# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization
 # in this module we override the fixture values defined in conftest.py
 # tuples of '_index_factory,_series_name,_index_start,_index_end'
 DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10))
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index b1d790644bbfb..dbc7db9f1c8c1 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -569,7 +569,7 @@ def test_stubs(self):
         stubs = ["inc", "edu"]
 
         # TODO: unused?
-        df_long = pd.wide_to_long(df, stubs, i="id", j="age")  # noqa
+        df_long = pd.wide_to_long(df, stubs, i="id", j="age")
 
         assert stubs == ["inc", "edu"]
 
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index e4980be49d35f..e1bbd3153511d 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -781,8 +781,8 @@ def test_apply_to_timedelta(self):
         list_of_strings = ["00:00:01", np.nan, pd.NaT, timedelta_NaT]
 
         # TODO: unused?
-        a = pd.to_timedelta(list_of_strings)  # noqa
-        b = Series(list_of_strings).apply(pd.to_timedelta)  # noqa
+        a = pd.to_timedelta(list_of_strings)
+        b = Series(list_of_strings).apply(pd.to_timedelta)
         # Can't compare until apply on a Series gives the correct dtype
         # assert_series_equal(a, b)
 
diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py
index 6b64b230a0bb9..d744678e1bebe 100644
--- a/pandas/tests/scalar/timestamp/test_rendering.py
+++ b/pandas/tests/scalar/timestamp/test_rendering.py
@@ -1,7 +1,7 @@
 import pprint
 
 import pytest
-import pytz  # noqa  # a test below uses pytz but only inside a `eval` call
+import pytz  # a test below uses pytz but only inside a `eval` call
 
 from pandas import Timestamp
 
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 652dd34ca7ce2..e6efb6ab952e4 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -330,7 +330,7 @@ def test_constructor_with_stringoffset(self):
         # converted to Chicago tz
         result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
         assert result.value == Timestamp("2013-11-01 05:00").value
-        expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"  # noqa
+        expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"
         assert repr(result) == expected
         assert result == eval(repr(result))
 
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index bf8d34cd62ff2..04619cf7e6e37 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -631,7 +631,7 @@ def test_indexing_unordered():
 
     for t in ts.index:
         # TODO: unused?
-        s = str(t)  # noqa
+        s = str(t)
 
         expected = ts[t]
         result = ts2[t]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 19d3e76f52adf..f6d10750eb5e9 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -761,7 +761,7 @@ def test_underlying_data_conversion():
     df["bb"].iloc[0] = 0.13
 
     # TODO: unused
-    df_tmp = df.iloc[ck]  # noqa
+    df_tmp = df.iloc[ck]
 
     df["bb"].iloc[0] = 0.15
     assert df["bb"].iloc[0] == 0.15
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 835514ea724ab..0e3f48b1e7fac 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -27,7 +27,7 @@
 
 def _skip_if_no_pchip():
     try:
-        from scipy.interpolate import pchip_interpolate  # noqa
+        from scipy.interpolate import pchip_interpolate
     except ImportError:
         import pytest
 
@@ -36,7 +36,7 @@ def _skip_if_no_pchip():
 
 def _skip_if_no_akima():
     try:
-        from scipy.interpolate import Akima1DInterpolator  # noqa
+        from scipy.interpolate import Akima1DInterpolator
     except ImportError:
         import pytest
 
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 9f881f5a5aa29..34040a4bd93ae 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -326,7 +326,7 @@ def test_categorical_series_repr_datetime(self):
 4   2011-01-01 13:00:00
 dtype: category
 Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
-                                 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""  # noqa
+                                 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
 
         assert repr(s) == exp
 
@@ -340,7 +340,7 @@ def test_categorical_series_repr_datetime(self):
 dtype: category
 Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
                                              2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
-                                             2011-01-01 13:00:00-05:00]"""  # noqa
+                                             2011-01-01 13:00:00-05:00]"""
 
         assert repr(s) == exp
 
@@ -354,7 +354,7 @@ def test_categorical_series_repr_datetime_ordered(self):
 4   2011-01-01 13:00:00
 dtype: category
 Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
-                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""  # noqa
+                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
 
         assert repr(s) == exp
 
@@ -368,7 +368,7 @@ def test_categorical_series_repr_datetime_ordered(self):
 dtype: category
 Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
                                              2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
-                                             2011-01-01 13:00:00-05:00]"""  # noqa
+                                             2011-01-01 13:00:00-05:00]"""
 
         assert repr(s) == exp
 
@@ -382,7 +382,7 @@ def test_categorical_series_repr_period(self):
 4    2011-01-01 13:00
 dtype: category
 Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(s) == exp
 
@@ -408,7 +408,7 @@ def test_categorical_series_repr_period_ordered(self):
 4    2011-01-01 13:00
 dtype: category
 Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
-                            2011-01-01 13:00]"""  # noqa
+                            2011-01-01 13:00]"""
 
         assert repr(s) == exp
 
@@ -452,7 +452,7 @@ def test_categorical_series_repr_timedelta(self):
 dtype: category
 Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
                                    3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
-                                   8 days 01:00:00, 9 days 01:00:00]"""  # noqa
+                                   8 days 01:00:00, 9 days 01:00:00]"""
 
         assert repr(s) == exp
 
@@ -465,7 +465,7 @@ def test_categorical_series_repr_timedelta_ordered(self):
 3   4 days
 4   5 days
 dtype: category
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""  # noqa
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
 
         assert repr(s) == exp
 
@@ -484,6 +484,6 @@ def test_categorical_series_repr_timedelta_ordered(self):
 dtype: category
 Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
                                    3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
-                                   8 days 01:00:00 < 9 days 01:00:00]"""  # noqa
+                                   8 days 01:00:00 < 9 days 01:00:00]"""
 
         assert repr(s) == exp
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 48cfc06f42e91..232835dab862d 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -188,7 +188,7 @@ def test_factorize_nan(self):
         na_sentinel = -1
 
         # TODO(wesm): unused?
-        ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)  # noqa
+        ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)
 
         expected = np.array([2, -1, 0], dtype="int32")
         assert len(set(key)) == len(set(expected))
@@ -1585,7 +1585,7 @@ def test_hashtable_factorize(self, htable, tm_dtype, writable):
     def test_hashtable_large_sizehint(self, hashtable):
         # GH 22729
         size_hint = np.iinfo(np.uint32).max + 1
-        tbl = hashtable(size_hint=size_hint)  # noqa
+        tbl = hashtable(size_hint=size_hint)
 
 
 def test_quantile():
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 1f19f58e80f26..433e20177f04c 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -292,20 +292,20 @@ def test_none_comparison(self):
                 o[0] = np.nan
 
                 # noinspection PyComparisonWithNone
-                result = o == None  # noqa
+                result = o == None
                 assert not result.iat[0]
                 assert not result.iat[1]
 
                 # noinspection PyComparisonWithNone
-                result = o != None  # noqa
+                result = o != None
                 assert result.iat[0]
                 assert result.iat[1]
 
-                result = None == o  # noqa
+                result = None == o
                 assert not result.iat[0]
                 assert not result.iat[1]
 
-                result = None != o  # noqa
+                result = None != o
                 assert result.iat[0]
                 assert result.iat[1]
 
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 65b2dab1b02a8..618899e8f1ab1 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -17,13 +17,13 @@ def test_get_callable_name():
     def fn(x):
         return x
 
-    lambda_ = lambda x: x  # noqa: E731
+    lambda_ = lambda x: x
     part1 = partial(fn)
     part2 = partial(part1)
 
     class somecall:
         def __call__(self):
-            return x  # noqa
+            return x
 
     assert getname(fn) == "fn"
     assert getname(lambda_)
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index b4d575682ffca..dfc9762ea742f 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -5,7 +5,7 @@
 import subprocess
 import sys
 
-import numpy as np  # noqa
+import numpy as np
 import pytest
 
 from pandas.compat import PY36
@@ -21,7 +21,7 @@ def import_module(name):
     if PY36:
         try:
             return importlib.import_module(name)
-        except ModuleNotFoundError:  # noqa
+        except ModuleNotFoundError:
             pytest.skip("skipping as {} not available".format(name))
 
     else:
@@ -40,8 +40,8 @@ def df():
 
 def test_dask(df):
 
-    toolz = import_module("toolz")  # noqa
-    dask = import_module("dask")  # noqa
+    toolz = import_module("toolz")
+    dask = import_module("dask")
 
     import dask.dataframe as dd
 
@@ -53,7 +53,7 @@ def test_dask(df):
 @pytest.mark.filterwarnings("ignore:Panel class is removed")
 def test_xarray(df):
 
-    xarray = import_module("xarray")  # noqa
+    xarray = import_module("xarray")
 
     assert df.to_xarray() is not None
 
@@ -68,7 +68,7 @@ def test_oo_optimizable():
 @pytest.mark.filterwarnings("ignore:can't:ImportWarning")
 def test_statsmodels():
 
-    statsmodels = import_module("statsmodels")  # noqa
+    statsmodels = import_module("statsmodels")
     import statsmodels.api as sm
     import statsmodels.formula.api as smf
 
@@ -80,7 +80,7 @@ def test_statsmodels():
 @pytest.mark.filterwarnings("ignore:can't:ImportWarning")
 def test_scikit_learn(df):
 
-    sklearn = import_module("sklearn")  # noqa
+    sklearn = import_module("sklearn")
     from sklearn import svm, datasets
 
     digits = datasets.load_digits()
@@ -101,14 +101,14 @@ def test_seaborn():
 
 def test_pandas_gbq(df):
 
-    pandas_gbq = import_module("pandas_gbq")  # noqa
+    pandas_gbq = import_module("pandas_gbq")
 
 
 @pytest.mark.xfail(reason="0.7.0 pending")
 @tm.network
 def test_pandas_datareader():
 
-    pandas_datareader = import_module("pandas_datareader")  # noqa
+    pandas_datareader = import_module("pandas_datareader")
     pandas_datareader.DataReader("F", "quandl", "2017-01-01", "2017-02-01")
 
 
@@ -119,7 +119,7 @@ def test_pandas_datareader():
 @pytest.mark.skip(reason="gh-25778: geopandas stack issue")
 def test_geopandas():
 
-    geopandas = import_module("geopandas")  # noqa
+    geopandas = import_module("geopandas")
     fp = geopandas.datasets.get_path("naturalearth_lowres")
     assert geopandas.read_file(fp) is not None
 
@@ -149,7 +149,7 @@ def _getitem_tuple(self, tup):
 @pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
 def test_pyarrow(df):
 
-    pyarrow = import_module("pyarrow")  # noqa
+    pyarrow = import_module("pyarrow")
     table = pyarrow.Table.from_pandas(df)
     result = table.to_pandas()
     tm.assert_frame_equal(result, df)
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 531c511e8c02d..82e8d2b4f535a 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -2,7 +2,7 @@
 
 from pandas.errors import AbstractMethodError
 
-import pandas as pd  # noqa
+import pandas as pd
 
 
 @pytest.mark.parametrize(
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index cfaf123045b1f..21964fa869c14 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1884,7 +1884,7 @@ def test_ismethods(self):
         digit_e = [False, False, False, True, False, False, False, True, False, False]
 
         # TODO: unused
-        num_e = [  # noqa
+        num_e = [
             False,
             False,
             False,
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index df3c7fe9c9936..7451f8dc432a5 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -338,8 +338,8 @@ def test_hash_collisions():
     #
     # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
     hashes = [
-        "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9",  # noqa: E501
-        "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe",  # noqa: E501
+        "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9",
+        "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe",
     ]
 
     # These should be different.
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index eb8600031439f..1d5be7ecf2c81 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -2,7 +2,7 @@
 from typing import List
 import warnings
 
-from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE  # noqa
+from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE
 import numpy as np
 
 from pandas.errors import PerformanceWarning
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index d906c0371d207..c2c7cdb8031c7 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -1,3 +1,3 @@
-from pandas.util._decorators import Appender, Substitution, cache_readonly  # noqa
+from pandas.util._decorators import Appender, Substitution, cache_readonly
 
-from pandas.core.util.hashing import hash_array, hash_pandas_object  # noqa
+from pandas.core.util.hashing import hash_array, hash_pandas_object
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index f8c08ed8c099f..e5b11a82a68e7 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -15,7 +15,7 @@
 )
 import warnings
 
-from pandas._libs.properties import cache_readonly  # noqa
+from pandas._libs.properties import cache_readonly
 
 FuncType = Callable[..., Any]
 F = TypeVar("F", bound=FuncType)
diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py
index 0f5324c8d02ba..4156cb449311e 100644
--- a/pandas/util/_tester.py
+++ b/pandas/util/_tester.py
@@ -13,7 +13,7 @@ def test(extra_args=None):
     except ImportError:
         raise ImportError("Need pytest>=4.0.2 to run tests")
     try:
-        import hypothesis  # noqa
+        import hypothesis
     except ImportError:
         raise ImportError("Need hypothesis>=3.58 to run tests")
     cmd = ["--skip-slow", "--skip-network", "--skip-db"]
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 73535e55d4fa5..03757570a7803 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -16,7 +16,7 @@
 import numpy as np
 from numpy.random import rand, randn
 
-from pandas._config.localization import (  # noqa:F401
+from pandas._config.localization import (
     can_set_locale,
     get_locales,
     set_locale,
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 1506acc95edf9..59829eb810745 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -929,7 +929,7 @@ def test_bad_class(self, capsys):
     )
     def test_bad_generic_functions(self, capsys, func):
         errors = validate_one(
-            self._import_path(klass="BadGenericDocStrings", func=func)  # noqa:F821
+            self._import_path(klass="BadGenericDocStrings", func=func)
         )["errors"]
         assert isinstance(errors, list)
         assert errors
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 1d0f4b583bd0c..88ec5a513cbcb 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -41,20 +41,20 @@
 # script. Setting here before matplotlib is loaded.
 # We don't warn for the number of open plots, as none is actually being opened
 os.environ["MPLBACKEND"] = "Template"
-import matplotlib  # noqa: E402 isort:skip
+import matplotlib
 
 matplotlib.rc("figure", max_open_warning=10000)
 
-import numpy  # noqa: E402 isort:skip
+import numpy
 
 BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
 sys.path.insert(0, os.path.join(BASE_PATH))
-import pandas  # noqa: E402 isort:skip
+import pandas
 
 sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
-from numpydoc.docscrape import NumpyDocString  # noqa: E402 isort:skip
-from pandas.io.formats.printing import pprint_thing  # noqa: E402 isort:skip
+from numpydoc.docscrape import NumpyDocString
+from pandas.io.formats.printing import pprint_thing
 
 
 PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
@@ -611,8 +611,8 @@ def validate_pep8(self):
         # that do not user numpy or pandas
         content = "".join(
             (
-                "import numpy as np  # noqa: F401\n",
-                "import pandas as pd  # noqa: F401\n",
+                "import numpy as np \n",
+                "import pandas as pd \n",
                 *self.examples_source_code,
             )
         )
diff --git a/setup.py b/setup.py
index 0dd1980088db8..2d4748fe8dd5d 100755
--- a/setup.py
+++ b/setup.py
@@ -59,8 +59,8 @@ def is_platform_mac():
 # The import of Extension must be after the import of Cython, otherwise
 # we do not get the appropriately patched class.
 # See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
-from distutils.extension import Extension  # noqa: E402 isort:skip
-from distutils.command.build import build  # noqa: E402 isort:skip
+from distutils.extension import Extension
+from distutils.command.build import build
 
 try:
     if not _CYTHON_INSTALLED:

From bb2618774ea16e6be36de96897a5b0be6009178b Mon Sep 17 00:00:00 2001
From: Kaathi <karthigeyan@honeybadgerlabs.in>
Date: Thu, 31 Oct 2019 06:21:38 +0530
Subject: [PATCH 3/4] NOQA fix in files to pass the linter and CI, issue #29207

---
 pandas/_typing.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/pandas/_typing.py b/pandas/_typing.py
index 2dc5a9e671d34..445eff9e19e47 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -17,12 +17,12 @@
 # and use a string literal forward reference to it in subsequent types
 # https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
 if TYPE_CHECKING:
-    from pandas._libs import Period, Timedelta, Timestamp
-    from pandas.core.arrays.base import ExtensionArray
-    from pandas.core.dtypes.dtypes import ExtensionDtype
-    from pandas.core.indexes.base import Index
-    from pandas.core.series import Series
-    from pandas.core.generic import NDFrame
+    from pandas._libs import Period, Timedelta, Timestamp  # noqa: F401
+    from pandas.core.arrays.base import ExtensionArray  # noqa: F401
+    from pandas.core.dtypes.dtypes import ExtensionDtype  # noqa: F401
+    from pandas.core.indexes.base import Index  # noqa: F401
+    from pandas.core.series import Series  # noqa: F401
+    from pandas.core.generic import NDFrame  # noqa: F401
 
 
 AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)

From 28b7404c6f414c20c042275040fa9fa8b38ae212 Mon Sep 17 00:00:00 2001
From: Kaathi <karthigeyan@honeybadgerlabs.in>
Date: Thu, 31 Oct 2019 06:32:22 +0530
Subject: [PATCH 4/4] Issue: #29207, Linter fix (NOQA)

---
 pandas/io/common.py | 2 +-
 pandas/io/sql.py    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/pandas/io/common.py b/pandas/io/common.py
index 861de9184d7aa..1a95fe21454c2 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -22,7 +22,7 @@
     Type,
     Union,
 )
-from urllib.parse import (
+from urllib.parse import (  # noqa
     urlencode,
     urljoin,
     urlparse as parse_url,
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 8af895ce1fce4..822b3288c82d9 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -48,7 +48,7 @@ def _is_sqlalchemy_connectable(con):
             _SQLALCHEMY_INSTALLED = False
 
     if _SQLALCHEMY_INSTALLED:
-        import sqlalchemy
+        import sqlalchemy  # noqa: F811
 
         return isinstance(con, sqlalchemy.engine.Connectable)
     else: