From b2b89a491bb607e958c432c17a5677b3abb6a952 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 23 Feb 2024 16:00:10 -0500
Subject: [PATCH 01/24] build: bump version
---
CHANGES.rst | 6 ++++++
coverage/version.py | 4 ++--
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 6deb2074d..3c3694456 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -20,6 +20,12 @@ upgrading your version of coverage.py.
.. Version 9.8.1 — 2027-07-27
.. --------------------------
+Unreleased
+----------
+
+Nothing yet.
+
+
.. scriv-start-here
.. _changes_7-4-3:
diff --git a/coverage/version.py b/coverage/version.py
index 73db1316a..0d7f6093e 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,8 +8,8 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 4, 3, "final", 0)
-_dev = 0
+version_info = (7, 4, 4, "alpha", 0)
+_dev = 1
def _make_version(
From 548eacb8cb0066198fe83ae5a0933d6e8a808db7 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Tue, 27 Feb 2024 15:49:11 -0500
Subject: [PATCH 02/24] build: dipping a toe into ruff
---
pyproject.toml | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/pyproject.toml b/pyproject.toml
index cd523c404..1752aa66c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -138,6 +138,20 @@ balanced_clumps = [
"GetZipBytesTest",
]
+## RUFF
+# We aren't using ruff for real yet...
+
+[tool.ruff]
+target-version = "py38" # Can't use [project]
+line-length = 100
+
+[tool.ruff.lint]
+select = ["ALL"]
+ignore = [
+ "ANN101", # Missing type annotation for `self` in method
+ "ERA001", # Found commented-out code
+]
+
## SCRIV
[tool.scriv]
From e4e238a9ed8f2ad2b9060247591b4c057c2953bf Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Tue, 27 Feb 2024 15:19:47 -0500
Subject: [PATCH 03/24] style: fix COM812 Trailing comma missing
---
ci/download_gha_artifacts.py | 2 +-
coverage/cmdline.py | 14 +-
coverage/collector.py | 2 +-
coverage/config.py | 6 +-
coverage/control.py | 14 +-
coverage/debug.py | 2 +-
coverage/execfile.py | 2 +-
coverage/files.py | 6 +-
coverage/html.py | 2 +-
coverage/inorout.py | 12 +-
coverage/jsonreport.py | 6 +-
coverage/lcovreport.py | 4 +-
coverage/misc.py | 2 +-
coverage/parser.py | 24 +--
coverage/phystokens.py | 2 +-
coverage/plugin_support.py | 2 +-
coverage/pytracer.py | 4 +-
coverage/report.py | 4 +-
coverage/sqldata.py | 48 +++---
coverage/sqlitedb.py | 2 +-
coverage/sysmon.py | 8 +-
coverage/templite.py | 8 +-
coverage/tomlconfig.py | 4 +-
coverage/types.py | 2 +-
coverage/xmlreport.py | 4 +-
igor.py | 20 +--
setup.py | 4 +-
tests/coveragetest.py | 2 +-
tests/modules/process_test/try_execfile.py | 2 +-
tests/plugin1.py | 2 +-
tests/test_api.py | 4 +-
tests/test_arcs.py | 50 +++---
tests/test_cmdline.py | 2 +-
tests/test_concurrency.py | 4 +-
tests/test_config.py | 2 +-
tests/test_context.py | 4 +-
tests/test_coverage.py | 190 ++++++++++-----------
tests/test_data.py | 4 +-
tests/test_files.py | 8 +-
tests/test_json.py | 12 +-
tests/test_numbits.py | 10 +-
tests/test_oddball.py | 8 +-
tests/test_parser.py | 6 +-
tests/test_plugins.py | 2 +-
tests/test_process.py | 10 +-
tests/test_report_common.py | 10 +-
tests/test_results.py | 6 +-
tests/test_setup.py | 2 +-
tests/test_templite.py | 46 ++---
tests/test_testing.py | 12 +-
tests/test_venv.py | 6 +-
tests/test_xml.py | 2 +-
52 files changed, 308 insertions(+), 308 deletions(-)
diff --git a/ci/download_gha_artifacts.py b/ci/download_gha_artifacts.py
index d91add0c3..e656b6d2e 100644
--- a/ci/download_gha_artifacts.py
+++ b/ci/download_gha_artifacts.py
@@ -99,7 +99,7 @@ def main(owner_repo, artifact_pattern, dest_dir):
print(
f"Downloading {artifact['name']}, "
+ f"size: {artifact['size_in_bytes']}, "
- + f"created: {utc2local(artifact['created_at'])}"
+ + f"created: {utc2local(artifact['created_at'])}",
)
download_url(artifact["archive_download_url"], temp_zip)
unpack_zipfile(temp_zip)
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 5379c7c5f..25012019a 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -419,7 +419,7 @@ def get_prog_name(self) -> str:
"erase": CmdOptionParser(
"erase",
[
- Opts.datafile
+ Opts.datafile,
] + GLOBAL_ARGS,
description="Erase previously collected coverage data.",
),
@@ -712,7 +712,7 @@ def command_line(self, argv: List[str]) -> int:
skip_empty=options.skip_empty,
sort=options.sort,
output_format=options.format,
- **report_args
+ **report_args,
)
elif options.action == "annotate":
self.coverage.annotate(directory=options.directory, **report_args)
@@ -724,25 +724,25 @@ def command_line(self, argv: List[str]) -> int:
skip_empty=options.skip_empty,
show_contexts=options.show_contexts,
title=options.title,
- **report_args
+ **report_args,
)
elif options.action == "xml":
total = self.coverage.xml_report(
outfile=options.outfile,
skip_empty=options.skip_empty,
- **report_args
+ **report_args,
)
elif options.action == "json":
total = self.coverage.json_report(
outfile=options.outfile,
pretty_print=options.pretty_print,
show_contexts=options.show_contexts,
- **report_args
+ **report_args,
)
elif options.action == "lcov":
total = self.coverage.lcov_report(
outfile=options.outfile,
- **report_args
+ **report_args,
)
else:
# There are no other possible actions.
@@ -839,7 +839,7 @@ def do_run(self, options: optparse.Values, args: List[str]) -> int:
show_help(
"Options affecting multiprocessing must only be specified " +
"in a configuration file.\n" +
- f"Remove --{opt_name} from the command line."
+ f"Remove --{opt_name} from the command line.",
)
return ERR
diff --git a/coverage/collector.py b/coverage/collector.py
index dcb8a30dd..9a7d5c02d 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -225,7 +225,7 @@ def __init__(
raise ConfigError(
"Can't support concurrency={} with {}, only threads are supported.".format(
tried, self.tracer_name(),
- )
+ ),
)
if do_threading or not concurrencies:
diff --git a/coverage/config.py b/coverage/config.py
index 24d5642b2..9eaf9dbd4 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -138,7 +138,7 @@ def getregexlist(self, section: str, option: str) -> List[str]:
re.compile(value)
except re.error as e:
raise ConfigError(
- f"Invalid [{section}].{option} value {value!r}: {e}"
+ f"Invalid [{section}].{option} value {value!r}: {e}",
) from e
if value:
value_list.append(value)
@@ -323,8 +323,8 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool)
for unknown in set(cp.options(section)) - options:
warn(
"Unrecognized option '[{}] {}=' in config file {}".format(
- real_section, unknown, filename
- )
+ real_section, unknown, filename,
+ ),
)
# [paths] is special
diff --git a/coverage/control.py b/coverage/control.py
index d33ef769a..e5da10227 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -30,7 +30,7 @@
from coverage.context import should_start_context_test_function, combine_context_switchers
from coverage.data import CoverageData, combine_parallel_data
from coverage.debug import (
- DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display
+ DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display,
)
from coverage.disposition import disposition_debug_msg
from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError
@@ -570,7 +570,7 @@ def _init_for_start(self) -> None:
for plugin in self._plugins.file_tracers
),
self._collector.tracer_name(),
- )
+ ),
)
for plugin in self._plugins.file_tracers:
plugin._coverage_enabled = False
@@ -800,7 +800,7 @@ def combine(
self,
data_paths: Optional[Iterable[str]] = None,
strict: bool = False,
- keep: bool = False
+ keep: bool = False,
) -> None:
"""Combine together a number of similarly-named coverage data files.
@@ -965,8 +965,8 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter:
if file_reporter is None:
raise PluginError(
"Plugin {!r} did not provide a file reporter for {!r}.".format(
- plugin._coverage_plugin_name, morf
- )
+ plugin._coverage_plugin_name, morf,
+ ),
)
if file_reporter == "python":
@@ -1312,7 +1312,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
("configs_read", self.config.config_files_read),
("config_file", self.config.config_file),
("config_contents",
- repr(self.config._config_contents) if self.config._config_contents else "-none-"
+ repr(self.config._config_contents) if self.config._config_contents else "-none-",
),
("data_file", self._data.data_filename() if self._data is not None else "-none-"),
("python", sys.version.replace("\n", "")),
@@ -1343,7 +1343,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
Coverage = decorate_methods( # type: ignore[misc]
show_calls(show_args=True),
- butnot=["get_data"]
+ butnot=["get_data"],
)(Coverage)
diff --git a/coverage/debug.py b/coverage/debug.py
index 8aaecb589..0895c570f 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -247,7 +247,7 @@ def short_stack(
for pat in BORING_PRELUDE:
stack = itertools.dropwhile(
(lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc]
- stack
+ stack,
)
lines = []
for frame_info in stack:
diff --git a/coverage/execfile.py b/coverage/execfile.py
index aac4d30bb..966fc680c 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -59,7 +59,7 @@ def find_module(
if not spec:
raise NoSource(
f"No module named {mod_main}; " +
- f"{modulename!r} is a package and cannot be directly executed"
+ f"{modulename!r} is a package and cannot be directly executed",
)
pathname = spec.origin
packagename = spec.name
diff --git a/coverage/files.py b/coverage/files.py
index 0b276d16a..9ae17a58a 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -478,12 +478,12 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str:
if not exists(new):
self.debugfn(
f"Rule {original_pattern!r} changed {path!r} to {new!r} " +
- "which doesn't exist, continuing"
+ "which doesn't exist, continuing",
)
continue
self.debugfn(
f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " +
- f"producing {new!r}"
+ f"producing {new!r}",
)
return new
@@ -500,7 +500,7 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str:
# Only add a new pattern if we don't already have this pattern.
if not any(p == pattern for p, _, _ in self.aliases):
self.debugfn(
- f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}"
+ f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}",
)
self.aliases.append((pattern, re.compile(regex_pat), result))
return self.map(path, exists=exists)
diff --git a/coverage/html.py b/coverage/html.py
index 5a571dac0..1db62b3e0 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -447,7 +447,7 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) ->
css_classes = []
if ldata.category:
css_classes.append(
- self.template_globals["category"][ldata.category] # type: ignore[index]
+ self.template_globals["category"][ldata.category], # type: ignore[index]
)
ldata.css_class = " ".join(css_classes) or "pln"
diff --git a/coverage/inorout.py b/coverage/inorout.py
index 2e3746249..4359570c2 100644
--- a/coverage/inorout.py
+++ b/coverage/inorout.py
@@ -270,14 +270,14 @@ def _debug(msg: str) -> None:
if modfile:
if self.third_match.match(modfile):
_debug(
- f"Source in third-party: source_pkg {pkg!r} at {modfile!r}"
+ f"Source in third-party: source_pkg {pkg!r} at {modfile!r}",
)
self.source_in_third_paths.add(canonical_path(source_for_file(modfile)))
else:
for pathdir in path:
if self.third_match.match(pathdir):
_debug(
- f"Source in third-party: {pkg!r} path directory at {pathdir!r}"
+ f"Source in third-party: {pkg!r} path directory at {pathdir!r}",
)
self.source_in_third_paths.add(pathdir)
@@ -363,7 +363,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
disp.has_dynamic_filename = True
else:
disp.source_filename = canonical_filename(
- file_tracer.source_filename()
+ file_tracer.source_filename(),
)
break
except Exception:
@@ -380,7 +380,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise PluginError(
- f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'"
+ f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'",
)
reason = self.check_include_omit_etc(disp.source_filename, frame)
if reason:
@@ -483,8 +483,8 @@ def warn_already_imported_files(self) -> None:
elif self.debug and self.debug.should("trace"):
self.debug.write(
"Didn't trace already imported file {!r}: {}".format(
- disp.original_filename, disp.reason
- )
+ disp.original_filename, disp.reason,
+ ),
)
def warn_unimported_source(self) -> None:
diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py
index 0820c816e..2a0b9c647 100644
--- a/coverage/jsonreport.py
+++ b/coverage/jsonreport.py
@@ -59,7 +59,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
measured_files[file_reporter.relative_filename()] = self.report_one_file(
coverage_data,
- analysis
+ analysis,
)
self.report_data["files"] = measured_files
@@ -117,10 +117,10 @@ def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Di
"missing_branches": nums.n_missing_branches,
})
reported_file["executed_branches"] = list(
- _convert_branch_arcs(analysis.executed_branch_arcs())
+ _convert_branch_arcs(analysis.executed_branch_arcs()),
)
reported_file["missing_branches"] = list(
- _convert_branch_arcs(analysis.missing_branch_arcs())
+ _convert_branch_arcs(analysis.missing_branch_arcs()),
)
return reported_file
diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py
index f0e084767..33eb39871 100644
--- a/coverage/lcovreport.py
+++ b/coverage/lcovreport.py
@@ -97,10 +97,10 @@ def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> No
missing_arcs = analysis.missing_branch_arcs()
executed_arcs = analysis.executed_branch_arcs()
for block_number, block_line_number in enumerate(
- sorted(analysis.branch_stats().keys())
+ sorted(analysis.branch_stats().keys()),
):
for branch_number, line_number in enumerate(
- sorted(missing_arcs[block_line_number])
+ sorted(missing_arcs[block_line_number]),
):
# The exit branches have a negative line number,
# this will not produce valid lcov. Setting
diff --git a/coverage/misc.py b/coverage/misc.py
index 0280650d7..6175d397e 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -243,7 +243,7 @@ def _needs_to_implement(that: Any, func_name: str) -> NoReturn:
name = f"{klass.__module__}.{klass.__name__}"
raise NotImplementedError(
- f"{thing} {name!r} needs to implement {func_name}()"
+ f"{thing} {name!r} needs to implement {func_name}()",
)
diff --git a/coverage/parser.py b/coverage/parser.py
index 9349c9ea8..3304ecab8 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -153,7 +153,7 @@ def _raw_parse(self) -> None:
if self.show_tokens: # pragma: debugging
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
- nice_pair((slineno, elineno)), ttext, ltext
+ nice_pair((slineno, elineno)), ttext, ltext,
))
if toktype == token.INDENT:
indent += 1
@@ -272,7 +272,7 @@ def parse_source(self) -> None:
lineno = err.args[1][0] # TokenError
raise NotPython(
f"Couldn't parse '{self.filename}' as Python source: " +
- f"{err.args[0]!r} at line {lineno}"
+ f"{err.args[0]!r} at line {lineno}",
) from err
self.excluded = self.first_lines(self.raw_excluded)
@@ -403,8 +403,8 @@ def __init__(
except SyntaxError as synerr:
raise NotPython(
"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- filename, synerr.msg, synerr.lineno or 0
- )
+ filename, synerr.msg, synerr.lineno or 0,
+ ),
) from synerr
def child_parsers(self) -> Iterable[ByteParser]:
@@ -756,7 +756,7 @@ def line_for_node(self, node: ast.AST) -> TLineNo:
node_name = node.__class__.__name__
handler = cast(
Optional[Callable[[ast.AST], TLineNo]],
- getattr(self, "_line__" + node_name, None)
+ getattr(self, "_line__" + node_name, None),
)
if handler is not None:
return handler(node)
@@ -830,7 +830,7 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
node_name = node.__class__.__name__
handler = cast(
Optional[Callable[[ast.AST], Set[ArcStart]]],
- getattr(self, "_handle__" + node_name, None)
+ getattr(self, "_handle__" + node_name, None),
)
if handler is not None:
return handler(node)
@@ -848,7 +848,7 @@ def add_body_arcs(
self,
body: Sequence[ast.AST],
from_start: Optional[ArcStart] = None,
- prev_starts: Optional[Set[ArcStart]] = None
+ prev_starts: Optional[Set[ArcStart]] = None,
) -> Set[ArcStart]:
"""Add arcs for the body of a compound statement.
@@ -896,7 +896,7 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
missing_fn = cast(
Optional[Callable[[ast.AST], Optional[ast.AST]]],
- getattr(self, "_missing__" + node.__class__.__name__, None)
+ getattr(self, "_missing__" + node.__class__.__name__, None),
)
if missing_fn is not None:
ret_node = missing_fn(node)
@@ -1223,7 +1223,7 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
if try_block.raise_from:
self.process_raise_exits(
- self._combine_finally_starts(try_block.raise_from, final_exits)
+ self._combine_finally_starts(try_block.raise_from, final_exits),
)
if try_block.return_from:
@@ -1304,15 +1304,15 @@ def _handle__With(self, node: ast.With) -> Set[ArcStart]:
exits = with_exit
if with_block.break_from:
self.process_break_exits(
- self._combine_finally_starts(with_block.break_from, with_exit)
+ self._combine_finally_starts(with_block.break_from, with_exit),
)
if with_block.continue_from:
self.process_continue_exits(
- self._combine_finally_starts(with_block.continue_from, with_exit)
+ self._combine_finally_starts(with_block.continue_from, with_exit),
)
if with_block.return_from:
self.process_return_exits(
- self._combine_finally_starts(with_block.return_from, with_exit)
+ self._combine_finally_starts(with_block.return_from, with_exit),
)
return exits
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 5fd5dacc5..2ee6dd74f 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -68,7 +68,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
yield tokenize.TokenInfo(
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
- last_line
+ last_line,
)
last_line = ltext
if ttype not in (tokenize.NEWLINE, tokenize.NL):
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index c99fb5e30..d1f2250bc 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -58,7 +58,7 @@ def load_plugins(
coverage_init = getattr(mod, "coverage_init", None)
if not coverage_init:
raise PluginError(
- f"Plugin module {module!r} didn't define a coverage_init function"
+ f"Plugin module {module!r} didn't define a coverage_init function",
)
options = config.get_plugin_options(module)
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index f527a4040..90ad3eb21 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -160,7 +160,7 @@ def _trace(
"Empty stack!",
frame.f_code.co_filename,
frame.f_lineno,
- frame.f_code.co_name
+ frame.f_code.co_name,
)
return None
@@ -190,7 +190,7 @@ def _trace(
self.cur_file_name,
self.last_line,
started_context,
- )
+ ),
)
# Improve tracing performance: when calling a function, both caller
diff --git a/coverage/report.py b/coverage/report.py
index 93cc8fb4d..4ad9a83fa 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -138,7 +138,7 @@ def _report_markdown(
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
header_str = "".join(header_items)
rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
- ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]]
+ ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]],
)
# Write the header
@@ -252,7 +252,7 @@ def tabular_report(self) -> None:
if self.config.skip_covered and self.skipped_count:
file_suffix = "s" if self.skipped_count>1 else ""
end_lines.append(
- f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
+ f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.",
)
if self.config.skip_empty and self.empty_count:
file_suffix = "s" if self.empty_count > 1 else ""
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index ad3e1b97d..ca06453f3 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -298,16 +298,16 @@ def _read_db(self) -> None:
else:
raise DataError(
"Data file {!r} doesn't seem to be a coverage data file: {}".format(
- self._filename, exc
- )
+ self._filename, exc,
+ ),
) from exc
else:
schema_version = row[0]
if schema_version != SCHEMA_VERSION:
raise DataError(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
- self._filename, schema_version, SCHEMA_VERSION
- )
+ self._filename, schema_version, SCHEMA_VERSION,
+ ),
)
row = db.execute_one("select value from meta where key = 'has_arcs'")
@@ -396,7 +396,7 @@ def loads(self, data: bytes) -> None:
self._debug.write(f"Loading data into data file {self._filename!r}")
if data[:1] != b"z":
raise DataError(
- f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)"
+ f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)",
)
script = zlib.decompress(data[1:]).decode("utf-8")
self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug)
@@ -416,7 +416,7 @@ def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
with self._connect() as con:
self._file_map[filename] = con.execute_for_rowid(
"insert or replace into file (path) values (?)",
- (filename,)
+ (filename,),
)
return self._file_map.get(filename)
@@ -456,7 +456,7 @@ def _set_context_id(self) -> None:
with self._connect() as con:
self._current_context_id = con.execute_for_rowid(
"insert into context (context) values (?)",
- (context,)
+ (context,),
)
def base_filename(self) -> str:
@@ -486,7 +486,7 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None:
"""
if self._debug.should("dataop"):
self._debug.write("Adding lines: %d files, %d lines total" % (
- len(line_data), sum(len(lines) for lines in line_data.values())
+ len(line_data), sum(len(lines) for lines in line_data.values()),
))
if self._debug.should("dataop2"):
for filename, linenos in sorted(line_data.items()):
@@ -524,7 +524,7 @@ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None:
"""
if self._debug.should("dataop"):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
- len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+ len(arc_data), sum(len(arcs) for arcs in arc_data.values()),
))
if self._debug.should("dataop2"):
for filename, arcs in sorted(arc_data.items()):
@@ -564,7 +564,7 @@ def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None
with self._connect() as con:
con.execute_void(
"insert or ignore into meta (key, value) values (?, ?)",
- ("has_arcs", str(int(arcs)))
+ ("has_arcs", str(int(arcs))),
)
@_locked
@@ -588,12 +588,12 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
filename, existing_plugin, plugin_name,
- )
+ ),
)
elif plugin_name:
con.execute_void(
"insert into tracer (file_id, tracer) values (?, ?)",
- (file_id, plugin_name)
+ (file_id, plugin_name),
)
def touch_file(self, filename: str, plugin_name: str = "") -> None:
@@ -685,7 +685,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"select file.path, context.context, arc.fromno, arc.tono " +
"from arc " +
"inner join file on file.id = arc.file_id " +
- "inner join context on context.id = arc.context_id"
+ "inner join context on context.id = arc.context_id",
) as cur:
arcs = [
(files[path], context, fromno, tono)
@@ -697,7 +697,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
- "inner join context on context.id = line_bits.context_id"
+ "inner join context on context.id = line_bits.context_id",
) as cur:
lines: Dict[Tuple[str, str], bytes] = {}
for path, context, numbits in cur:
@@ -710,7 +710,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
with con.execute(
"select file.path, tracer " +
"from tracer " +
- "inner join file on file.id = tracer.file_id"
+ "inner join file on file.id = tracer.file_id",
) as cur:
tracers = {files[path]: tracer for (path, tracer) in cur}
@@ -726,7 +726,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
this_tracers = {path: "" for path, in cur}
with con.execute(
"select file.path, tracer from tracer " +
- "inner join file on file.id = tracer.file_id"
+ "inner join file on file.id = tracer.file_id",
) as cur:
this_tracers.update({
aliases.map(path): tracer
@@ -736,14 +736,14 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
# Create all file and context rows in the DB.
con.executemany_void(
"insert or ignore into file (path) values (?)",
- ((file,) for file in files.values())
+ ((file,) for file in files.values()),
)
with con.execute("select id, path from file") as cur:
file_ids = {path: id for id, path in cur}
self._file_map.update(file_ids)
con.executemany_void(
"insert or ignore into context (context) values (?)",
- ((context,) for context in contexts)
+ ((context,) for context in contexts),
)
with con.execute("select id, context from context") as cur:
context_ids = {context: id for id, context in cur}
@@ -759,8 +759,8 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
if this_tracer is not None and this_tracer != other_tracer:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
- path, this_tracer, other_tracer
- )
+ path, this_tracer, other_tracer,
+ ),
)
tracer_map[path] = other_tracer
@@ -777,7 +777,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
- "inner join context on context.id = line_bits.context_id"
+ "inner join context on context.id = line_bits.context_id",
) as cur:
for path, context, numbits in cur:
key = (aliases.map(path), context)
@@ -792,7 +792,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
con.executemany_void(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
- arc_rows
+ arc_rows,
)
if lines:
@@ -804,11 +804,11 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
[
(file_ids[file], context_ids[context], numbits)
for (file, context), numbits in lines.items()
- ]
+ ],
)
con.executemany_void(
"insert or ignore into tracer (file_id, tracer) values (?, ?)",
- ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
+ ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()),
)
if not self._no_disk:
diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py
index 468436bdd..521431d66 100644
--- a/coverage/sqlitedb.py
+++ b/coverage/sqlitedb.py
@@ -64,7 +64,7 @@ def _connect(self) -> None:
if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"):
# Turn off defensive mode, so that journal_mode=off can succeed.
self.con.setconfig( # type: ignore[attr-defined, unused-ignore]
- sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False
+ sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False,
)
# This pragma makes writing faster. It disables rollbacks, but we never need them.
diff --git a/coverage/sysmon.py b/coverage/sysmon.py
index e6fe28fc3..a2ee87a0b 100644
--- a/coverage/sysmon.py
+++ b/coverage/sysmon.py
@@ -73,7 +73,7 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any:
assert sys_monitoring is not None
short_stack = functools.partial(
- short_stack, full=True, short_filenames=True, frame_ids=True
+ short_stack, full=True, short_filenames=True, frame_ids=True,
)
seen_threads: Set[int] = set()
@@ -359,7 +359,7 @@ def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorRet
@panopticon("code", "@")
def sysmon_py_resume_arcs(
- self, code: CodeType, instruction_offset: int
+ self, code: CodeType, instruction_offset: int,
) -> MonitorReturn:
"""Handle sys.monitoring.events.PY_RESUME events for branch coverage."""
frame = self.callers_frame()
@@ -367,7 +367,7 @@ def sysmon_py_resume_arcs(
@panopticon("code", "@", None)
def sysmon_py_return_arcs(
- self, code: CodeType, instruction_offset: int, retval: object
+ self, code: CodeType, instruction_offset: int, retval: object,
) -> MonitorReturn:
"""Handle sys.monitoring.events.PY_RETURN events for branch coverage."""
frame = self.callers_frame()
@@ -384,7 +384,7 @@ def sysmon_py_return_arcs(
@panopticon("code", "@", "exc")
def sysmon_py_unwind_arcs(
- self, code: CodeType, instruction_offset: int, exception: BaseException
+ self, code: CodeType, instruction_offset: int, exception: BaseException,
) -> MonitorReturn:
"""Handle sys.monitoring.events.PY_UNWIND events for branch coverage."""
frame = self.callers_frame()
diff --git a/coverage/templite.py b/coverage/templite.py
index 11ea847be..843ea94eb 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -196,8 +196,8 @@ def flush_output() -> None:
code.add_line(
"for c_{} in {}:".format(
words[1],
- self._expr_code(words[3])
- )
+ self._expr_code(words[3]),
+ ),
)
code.indent()
elif words[0] == "joined":
@@ -241,7 +241,7 @@ def flush_output() -> None:
self._render_function = cast(
Callable[
[Dict[str, Any], Callable[..., Any]],
- str
+ str,
],
code.get_globals()["render_function"],
)
@@ -302,7 +302,7 @@ def _do_dots(self, value: Any, *dots: str) -> Any:
value = value[dot]
except (TypeError, KeyError) as exc:
raise TempliteValueError(
- f"Couldn't evaluate {value!r}.{dot}"
+ f"Couldn't evaluate {value!r}.{dot}",
) from exc
if callable(value):
value = value()
diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py
index 139cb2c1b..bc4cfc337 100644
--- a/coverage/tomlconfig.py
+++ b/coverage/tomlconfig.py
@@ -165,10 +165,10 @@ def _check_type(
return converter(value)
except Exception as e:
raise ValueError(
- f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}"
+ f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}",
) from e
raise ValueError(
- f"Option [{section}]{option} is not {type_desc}: {value!r}"
+ f"Option [{section}]{option} is not {type_desc}: {value!r}",
)
def getboolean(self, section: str, option: str) -> bool:
diff --git a/coverage/types.py b/coverage/types.py
index b39798573..60023143a 100644
--- a/coverage/types.py
+++ b/coverage/types.py
@@ -43,7 +43,7 @@ def __call__(
frame: FrameType,
event: str,
arg: Any,
- lineno: Optional[TLineNo] = None # Our own twist, see collector.py
+ lineno: Optional[TLineNo] = None, # Our own twist, see collector.py
) -> Optional[TTraceFn]:
...
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index 819b4c6bc..ae4393557 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -97,7 +97,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] =
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(
- f" Generated by coverage.py: {__url__} "
+ f" Generated by coverage.py: {__url__} ",
))
xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} "))
@@ -222,7 +222,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
xline.setAttribute("branch", "true")
xline.setAttribute(
"condition-coverage",
- "%d%% (%d/%d)" % (100*taken//total, taken, total)
+ "%d%% (%d/%d)" % (100*taken//total, taken, total),
)
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
diff --git a/igor.py b/igor.py
index 5aa185f42..56b1a01fa 100644
--- a/igor.py
+++ b/igor.py
@@ -82,7 +82,7 @@ def do_remove_extension(*args):
"import coverage; print(coverage.__file__)",
],
encoding="utf-8",
- ).strip()
+ ).strip(),
)
roots = [root]
else:
@@ -245,7 +245,7 @@ def do_combine_html():
cov = coverage.Coverage(config_file="metacov.ini", messages=True)
cov.load()
show_contexts = bool(
- os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT")
+ os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT"),
)
cov.html_report(show_contexts=show_contexts)
@@ -280,7 +280,7 @@ def do_zip_mods():
assert [ord(c) for c in text] == ords
print(u"All OK with {encoding}")
encoding = "{encoding}"
- """
+ """,
)
# These encodings should match the list in tests/test_python.py
details = [
@@ -333,7 +333,7 @@ def print_banner(label):
def do_quietly(command):
"""Run a command in a shell, and suppress all output."""
proc = subprocess.run(
- command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
+ command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
)
return proc.returncode
@@ -388,7 +388,7 @@ def do_edit_for_release():
# NOTICE.txt
update_file(
- "NOTICE.txt", r"Copyright 2004.*? Ned", f"Copyright 2004-{facts.now:%Y} Ned"
+ "NOTICE.txt", r"Copyright 2004.*? Ned", f"Copyright 2004-{facts.now:%Y} Ned",
)
# CHANGES.rst
@@ -411,7 +411,7 @@ def do_edit_for_release():
# The date of release, in "monthname day, year" format.
release_date = "{facts.now:%B %-d, %Y}"
# @@@ end
- """
+ """,
)
update_file("doc/conf.py", r"(?s)# @@@ editable\n.*# @@@ end\n", new_conf)
@@ -430,7 +430,7 @@ def do_bump_version():
# coverage/version.py
next_version = f"version_info = {facts.next_vi}\n_dev = 1".replace("'", '"')
update_file(
- "coverage/version.py", r"(?m)^version_info = .*\n_dev = \d+$", next_version
+ "coverage/version.py", r"(?m)^version_info = .*\n_dev = \d+$", next_version,
)
@@ -445,13 +445,13 @@ def do_cheats():
github = f"https://github.com/{repo}"
egg = "egg=coverage==0.0" # to force a re-install
print(
- f"https://coverage.readthedocs.io/en/{facts.ver}/changes.html#changes-{facts.anchor}"
+ f"https://coverage.readthedocs.io/en/{facts.ver}/changes.html#changes-{facts.anchor}",
)
print(
"\n## For GitHub commenting:\n"
+ "This is now released as part of "
- + f"[coverage {facts.ver}](https://pypi.org/project/coverage/{facts.ver})."
+ + f"[coverage {facts.ver}](https://pypi.org/project/coverage/{facts.ver}).",
)
print("\n## To run this code:")
@@ -465,7 +465,7 @@ def do_cheats():
"\n## For other collaborators:\n"
+ f"git clone {github}\n"
+ f"cd {repo.partition('/')[-1]}\n"
- + f"git checkout {facts.sha}"
+ + f"git checkout {facts.sha}",
)
diff --git a/setup.py b/setup.py
index da105fcd9..cc5f7ed1a 100644
--- a/setup.py
+++ b/setup.py
@@ -86,7 +86,7 @@
"coverage": [
"htmlfiles/*.*",
"py.typed",
- ]
+ ],
},
entry_points={
# Install a script as "coverage", and as "coverage3", and as
@@ -199,7 +199,7 @@ def build_extension(self, ext):
cmdclass={
"build_ext": ve_build_ext,
},
- )
+ ),
)
diff --git a/tests/coveragetest.py b/tests/coveragetest.py
index c93dcb8dd..5576bf016 100644
--- a/tests/coveragetest.py
+++ b/tests/coveragetest.py
@@ -76,7 +76,7 @@ def start_import_stop(
self,
cov: Coverage,
modname: str,
- modfile: Optional[str] = None
+ modfile: Optional[str] = None,
) -> ModuleType:
"""Start coverage, import a file, then stop coverage.
diff --git a/tests/modules/process_test/try_execfile.py b/tests/modules/process_test/try_execfile.py
index 47ffb78b4..7b4176215 100644
--- a/tests/modules/process_test/try_execfile.py
+++ b/tests/modules/process_test/try_execfile.py
@@ -103,7 +103,7 @@ def word_group(w: str) -> int:
if loader is not None:
globals_to_check.update({
- '__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None)
+ '__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None),
})
if spec is not None:
diff --git a/tests/plugin1.py b/tests/plugin1.py
index 4848eaff5..afaa17222 100644
--- a/tests/plugin1.py
+++ b/tests/plugin1.py
@@ -35,7 +35,7 @@ def __init__(self, filename: str) -> None:
self._filename = filename
self._source_filename = os.path.join(
"/src",
- os.path.basename(filename.replace("xyz.py", "ABC.zz"))
+ os.path.basename(filename.replace("xyz.py", "ABC.zz")),
)
def source_filename(self) -> str:
diff --git a/tests/test_api.py b/tests/test_api.py
index 16dde146e..7e291b21d 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -1349,7 +1349,7 @@ def test_combine_parallel_data_with_a_corrupt_file(self) -> None:
assert_coverage_warnings(
warns,
re.compile(
- r"Couldn't use data file '.*[/\\]\.coverage\.bad': " + BAD_SQLITE_REGEX
+ r"Couldn't use data file '.*[/\\]\.coverage\.bad': " + BAD_SQLITE_REGEX,
),
)
@@ -1380,7 +1380,7 @@ def test_combine_no_usable_files(self) -> None:
cov.combine(strict=True)
warn_rx = re.compile(
- r"Couldn't use data file '.*[/\\]\.coverage\.bad[12]': " + BAD_SQLITE_REGEX
+ r"Couldn't use data file '.*[/\\]\.coverage\.bad[12]': " + BAD_SQLITE_REGEX,
)
assert_coverage_warnings(warns, warn_rx, warn_rx)
diff --git a/tests/test_arcs.py b/tests/test_arcs.py
index 615dd557a..1c1437fbe 100644
--- a/tests/test_arcs.py
+++ b/tests/test_arcs.py
@@ -32,14 +32,14 @@ def test_simple_sequence(self) -> None:
a = 1
b = 2
""",
- arcz=".1 12 2."
+ arcz=".1 12 2.",
)
self.check_coverage("""\
a = 1
b = 3
""",
- arcz=".1 13 3."
+ arcz=".1 13 3.",
)
line1 = 1 if env.PYBEHAVIOR.module_firstline_1 else 2
self.check_coverage("""\
@@ -49,7 +49,7 @@ def test_simple_sequence(self) -> None:
c = 5
""",
- arcz="-{0}2 23 35 5-{0}".format(line1)
+ arcz="-{0}2 23 35 5-{0}".format(line1),
)
def test_function_def(self) -> None:
@@ -59,7 +59,7 @@ def foo():
foo()
""",
- arcz=".1 .2 14 2. 4."
+ arcz=".1 .2 14 2. 4.",
)
def test_if(self) -> None:
@@ -69,7 +69,7 @@ def test_if(self) -> None:
a = 3
assert a == 3
""",
- arcz=".1 12 23 24 34 4.", arcz_missing="24"
+ arcz=".1 12 23 24 34 4.", arcz_missing="24",
)
self.check_coverage("""\
a = 1
@@ -77,7 +77,7 @@ def test_if(self) -> None:
a = 3
assert a == 1
""",
- arcz=".1 12 23 24 34 4.", arcz_missing="23 34"
+ arcz=".1 12 23 24 34 4.", arcz_missing="23 34",
)
def test_if_else(self) -> None:
@@ -88,7 +88,7 @@ def test_if_else(self) -> None:
a = 4
assert a == 2
""",
- arcz=".1 12 25 14 45 5.", arcz_missing="14 45"
+ arcz=".1 12 25 14 45 5.", arcz_missing="14 45",
)
self.check_coverage("""\
if len([]) == 1:
@@ -97,7 +97,7 @@ def test_if_else(self) -> None:
a = 4
assert a == 4
""",
- arcz=".1 12 25 14 45 5.", arcz_missing="12 25"
+ arcz=".1 12 25 14 45 5.", arcz_missing="12 25",
)
def test_compact_if(self) -> None:
@@ -115,7 +115,7 @@ def fn(x):
a = fn(1)
assert a is True
""",
- arcz=".1 14 45 5. .2 2. 23 3.", arcz_missing="23 3."
+ arcz=".1 14 45 5. .2 2. 23 3.", arcz_missing="23 3.",
)
def test_multiline(self) -> None:
@@ -153,7 +153,7 @@ def foo():
return a
assert foo() == 3 # 7
""",
- arcz=".1 17 7. .2 23 36 25 56 6.", arcz_missing="25 56"
+ arcz=".1 17 7. .2 23 36 25 56 6.", arcz_missing="25 56",
)
self.check_coverage("""\
def foo():
@@ -163,7 +163,7 @@ def foo():
a = 5
foo() # 6
""",
- arcz=".1 16 6. .2 23 3. 25 5.", arcz_missing="25 5."
+ arcz=".1 16 6. .2 23 3. 25 5.", arcz_missing="25 5.",
)
def test_what_is_the_sound_of_no_lines_clapping(self) -> None:
@@ -363,7 +363,7 @@ def test_loop(self) -> None:
a = i
assert a == -1
""",
- arcz=".1 12 23 32 24 4.", arcz_missing="23 32"
+ arcz=".1 12 23 32 24 4.", arcz_missing="23 32",
)
def test_nested_loop(self) -> None:
@@ -391,7 +391,7 @@ def test_break(self) -> None:
a = 99
assert a == 0 # 5
""",
- arcz=arcz, arcz_missing=arcz_missing
+ arcz=arcz, arcz_missing=arcz_missing,
)
def test_continue(self) -> None:
@@ -409,7 +409,7 @@ def test_continue(self) -> None:
a = 99
assert a == 9 # 5
""",
- arcz=arcz, arcz_missing=arcz_missing
+ arcz=arcz, arcz_missing=arcz_missing,
)
def test_nested_breaks(self) -> None:
@@ -422,7 +422,7 @@ def test_nested_breaks(self) -> None:
break
assert a == 2 and i == 2 # 7
""",
- arcz=".1 12 23 34 45 25 56 51 67 17 7.", arcz_missing="17 25"
+ arcz=".1 12 23 34 45 25 56 51 67 17 7.", arcz_missing="17 25",
)
def test_while_1(self) -> None:
@@ -498,7 +498,7 @@ def test_bug_496_continue_in_constant_while(self) -> None:
i = "line 6"
break
""",
- arcz=arcz
+ arcz=arcz,
)
def test_for_if_else_for(self) -> None:
@@ -525,7 +525,7 @@ def branches_3(l):
".1 18 8G GH H. " +
".2 23 34 43 26 3. 6. " +
"-89 9A 9-8 AB BC CB B9 AE E9",
- arcz_missing="26 6."
+ arcz_missing="26 6.",
)
def test_for_else(self) -> None:
@@ -540,7 +540,7 @@ def forelse(seq):
forelse([1,2])
forelse([1,6])
""",
- arcz=".1 .2 23 32 34 47 26 67 7. 18 89 9."
+ arcz=".1 .2 23 32 34 47 26 67 7. 18 89 9.",
)
def test_while_else(self) -> None:
@@ -724,7 +724,7 @@ def test_try_except(self) -> None:
b = 5
assert a == 3 and b == 1
""",
- arcz=".1 12 23 36 45 56 6.", arcz_missing="45 56"
+ arcz=".1 12 23 36 45 56 6.", arcz_missing="45 56",
)
def test_raise_followed_by_statement(self) -> None:
@@ -986,7 +986,7 @@ def test_except_finally(self) -> None:
c = 7
assert a == 3 and b == 1 and c == 7
""",
- arcz=".1 12 23 45 37 57 78 8.", arcz_missing="45 57"
+ arcz=".1 12 23 45 37 57 78 8.", arcz_missing="45 57",
)
self.check_coverage("""\
a, b, c = 1, 1, 1
@@ -1003,7 +1003,7 @@ def oops(x):
assert a == 5 and b == 9 and c == 11
""",
arcz=".1 12 -23 3-2 24 45 56 67 7B 89 9B BC C.",
- arcz_missing="67 7B", arcz_unpredicted="68"
+ arcz_missing="67 7B", arcz_unpredicted="68",
)
def test_multiple_except_clauses(self) -> None:
@@ -1516,13 +1516,13 @@ def test_dunder_debug(self) -> None:
# Check that executed code has __debug__
self.check_coverage("""\
assert __debug__, "assert __debug__"
- """
+ """,
)
# Check that if it didn't have debug, it would let us know.
with pytest.raises(AssertionError):
self.check_coverage("""\
assert not __debug__, "assert not __debug__"
- """
+ """,
)
def test_if_debug(self) -> None:
@@ -1612,7 +1612,7 @@ def test_unpacked_literals(self) -> None:
}
assert weird['b'] == 3
""",
- arcz=".1 15 5A A."
+ arcz=".1 15 5A A.",
)
self.check_coverage("""\
l = [
@@ -1626,7 +1626,7 @@ def test_unpacked_literals(self) -> None:
]
assert weird[1] == 3
""",
- arcz=".1 15 5A A."
+ arcz=".1 15 5A A.",
)
@pytest.mark.parametrize("n", [10, 50, 100, 500, 1000, 2000, 10000])
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index d27f6a4aa..f24ddf88c 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -875,7 +875,7 @@ def test_run_dashm_only(self) -> None:
show_help('No module specified for -m')
""",
ret=ERR,
- options={"run:command_line": "myprog.py"}
+ options={"run:command_line": "myprog.py"},
)
def test_cant_append_parallel(self) -> None:
diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py
index 2bec5fb87..ae54f66df 100644
--- a/tests/test_concurrency.py
+++ b/tests/test_concurrency.py
@@ -492,7 +492,7 @@ def try_multiprocessing_code(
assert all(
re.fullmatch(
r"(Combined data file|Skipping duplicate data) \.coverage\..*\.\d+\.X\w{6}x",
- line
+ line,
)
for line in out_lines
)
@@ -732,7 +732,7 @@ def subproc(x):
[run]
parallel = True
concurrency = multiprocessing
- """ + ("sigterm = true" if sigterm else "")
+ """ + ("sigterm = true" if sigterm else ""),
)
out = self.run_command("coverage run clobbered.py")
# Under Linux, things go wrong. Does that matter?
diff --git a/tests/test_config.py b/tests/test_config.py
index 6823d4337..e2ab90e88 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -602,7 +602,7 @@ def assert_config_settings_are_correct(self, cov: Coverage) -> None:
assert cov.config.paths == {
'source': ['.', '/home/ned/src/'],
- 'other': ['other', '/home/ned/other', 'c:\\Ned\\etc']
+ 'other': ['other', '/home/ned/other', 'c:\\Ned\\etc'],
}
assert cov.config.get_plugin_options("plugins.a_plugin") == {
diff --git a/tests/test_context.py b/tests/test_context.py
index 3c0fadb7d..d3803ace3 100644
--- a/tests/test_context.py
+++ b/tests/test_context.py
@@ -169,7 +169,7 @@ def test_dynamic_alone(self) -> None:
fname = full_names["two_tests.py"]
assert_count_equal(
data.measured_contexts(),
- ["", "two_tests.test_one", "two_tests.test_two"]
+ ["", "two_tests.test_one", "two_tests.test_two"],
)
def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
@@ -191,7 +191,7 @@ def test_static_and_dynamic(self) -> None:
fname = full_names["two_tests.py"]
assert_count_equal(
data.measured_contexts(),
- ["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"]
+ ["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"],
)
def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
diff --git a/tests/test_coverage.py b/tests/test_coverage.py
index 96639c072..56b9a35c4 100644
--- a/tests/test_coverage.py
+++ b/tests/test_coverage.py
@@ -23,7 +23,7 @@ def test_successful_coverage(self) -> None:
a = 1
b = 2
""",
- [1,2]
+ [1,2],
)
# You can provide a list of possible statement matches.
self.check_coverage("""\
@@ -48,7 +48,7 @@ def test_successful_coverage(self) -> None:
a = 3
""",
[1,2,3],
- missing=("47-49", "3", "100,102")
+ missing=("47-49", "3", "100,102"),
)
def test_failed_coverage(self) -> None:
@@ -58,7 +58,7 @@ def test_failed_coverage(self) -> None:
a = 1
b = 2
""",
- [1]
+ [1],
)
# If the list of lines possibilities is wrong, the msg shows right.
msg = r"None of the lines choices matched \[1, 2]"
@@ -67,7 +67,7 @@ def test_failed_coverage(self) -> None:
a = 1
b = 2
""",
- ([1], [2])
+ ([1], [2]),
)
# If the missing lines are wrong, the message shows right and wrong.
with pytest.raises(AssertionError, match=r"'3' != '37'"):
@@ -97,7 +97,7 @@ def test_exceptions_really_fail(self) -> None:
self.check_coverage("""\
a = 1
assert a == 99, "This is bad"
- """
+ """,
)
# Other exceptions too.
with pytest.raises(ZeroDivisionError, match="division"):
@@ -105,7 +105,7 @@ def test_exceptions_really_fail(self) -> None:
a = 1
assert a == 1, "This is good"
a/0
- """
+ """,
)
@@ -121,7 +121,7 @@ def test_simple(self) -> None:
# Nothing here
d = 6
""",
- [1,2,4,6], report="4 0 0 0 100%"
+ [1,2,4,6], report="4 0 0 0 100%",
)
def test_indentation_wackiness(self) -> None:
@@ -131,7 +131,7 @@ def test_indentation_wackiness(self) -> None:
if not sys.path:
a = 1
""", # indented last line
- [1,2,3], "3"
+ [1,2,3], "3",
)
def test_multiline_initializer(self) -> None:
@@ -144,7 +144,7 @@ def test_multiline_initializer(self) -> None:
e = { 'foo': 1, 'bar': 2 }
""",
- [1,7], ""
+ [1,7], "",
)
def test_list_comprehension(self) -> None:
@@ -155,7 +155,7 @@ def test_list_comprehension(self) -> None:
]
assert l == [12, 14, 16, 18]
""",
- [1,5], ""
+ [1,5], "",
)
@@ -170,21 +170,21 @@ def test_expression(self) -> None:
12
23
""",
- ([1,2],[2]), ""
+ ([1,2],[2]), "",
)
self.check_coverage("""\
12
23
a = 3
""",
- ([1,2,3],[3]), ""
+ ([1,2,3],[3]), "",
)
self.check_coverage("""\
1 + 2
1 + \\
2
""",
- ([1,2], [2]), ""
+ ([1,2], [2]), "",
)
self.check_coverage("""\
1 + 2
@@ -192,7 +192,7 @@ def test_expression(self) -> None:
2
a = 4
""",
- ([1,2,4], [4]), ""
+ ([1,2,4], [4]), "",
)
def test_assert(self) -> None:
@@ -205,7 +205,7 @@ def test_assert(self) -> None:
2), \\
'something is amiss'
""",
- [1,2,4,5], ""
+ [1,2,4,5], "",
)
def test_assignment(self) -> None:
@@ -217,7 +217,7 @@ def test_assignment(self) -> None:
c = \\
1
""",
- [1,2,4], ""
+ [1,2,4], "",
)
def test_assign_tuple(self) -> None:
@@ -226,7 +226,7 @@ def test_assign_tuple(self) -> None:
a,b,c = 7,8,9
assert a == 7 and b == 8 and c == 9
""",
- [1,2,3], ""
+ [1,2,3], "",
)
def test_more_assignments(self) -> None:
@@ -242,7 +242,7 @@ def test_more_assignments(self) -> None:
] = \\
9
""",
- [1, 2, 3], ""
+ [1, 2, 3], "",
)
def test_attribute_assignment(self) -> None:
@@ -256,7 +256,7 @@ class obj: pass
o.foo = \\
1
""",
- [1,2,3,4,6], ""
+ [1,2,3,4,6], "",
)
def test_list_of_attribute_assignment(self) -> None:
@@ -271,7 +271,7 @@ class obj: pass
1, \\
2
""",
- [1,2,3,4,7], ""
+ [1,2,3,4,7], "",
)
def test_augmented_assignment(self) -> None:
@@ -283,7 +283,7 @@ def test_augmented_assignment(self) -> None:
a += \\
1
""",
- [1,2,3,5], ""
+ [1,2,3,5], "",
)
def test_triple_string_stuff(self) -> None:
@@ -306,7 +306,7 @@ def test_triple_string_stuff(self) -> None:
lines.
''')
""",
- [1,5,11], ""
+ [1,5,11], "",
)
def test_pass(self) -> None:
@@ -316,14 +316,14 @@ def test_pass(self) -> None:
if 1==1:
pass
""",
- [1,2], ""
+ [1,2], "",
)
self.check_coverage("""\
def foo():
pass
foo()
""",
- [1,2,3], ""
+ [1,2,3], "",
)
self.check_coverage("""\
def foo():
@@ -331,7 +331,7 @@ def foo():
pass
foo()
""",
- ([1,3,4], [1,4]), ""
+ ([1,3,4], [1,4]), "",
)
self.check_coverage("""\
class Foo:
@@ -339,7 +339,7 @@ def foo(self):
pass
Foo().foo()
""",
- [1,2,3,4], ""
+ [1,2,3,4], "",
)
self.check_coverage("""\
class Foo:
@@ -348,7 +348,7 @@ def foo(self):
pass
Foo().foo()
""",
- ([1,2,4,5], [1,2,5]), ""
+ ([1,2,4,5], [1,2,5]), "",
)
def test_del(self) -> None:
@@ -363,7 +363,7 @@ def test_del(self) -> None:
d['e']
assert(len(d.keys()) == 0)
""",
- [1,2,3,6,9], ""
+ [1,2,3,6,9], "",
)
def test_raise(self) -> None:
@@ -375,7 +375,7 @@ def test_raise(self) -> None:
except:
pass
""",
- [1,2,5,6], ""
+ [1,2,5,6], "",
)
def test_raise_followed_by_statement(self) -> None:
@@ -392,7 +392,7 @@ def test_raise_followed_by_statement(self) -> None:
except:
pass
""",
- lines=lines, missing=missing
+ lines=lines, missing=missing,
)
def test_return(self) -> None:
@@ -404,7 +404,7 @@ def fn():
x = fn()
assert(x == 1)
""",
- [1,2,3,5,6], ""
+ [1,2,3,5,6], "",
)
self.check_coverage("""\
def fn():
@@ -416,7 +416,7 @@ def fn():
x = fn()
assert(x == 2)
""",
- [1,2,3,7,8], ""
+ [1,2,3,7,8], "",
)
self.check_coverage("""\
def fn():
@@ -428,7 +428,7 @@ def fn():
x,y,z = fn()
assert x == 1 and y == 2 and z == 3
""",
- [1,2,3,7,8], ""
+ [1,2,3,7,8], "",
)
def test_return_followed_by_statement(self) -> None:
@@ -462,7 +462,7 @@ def gen():
a,b,c = gen()
assert a == 1 and b == 9 and c == (1,2)
""",
- [1,2,3,6,8,9], ""
+ [1,2,3,6,8,9], "",
)
def test_break(self) -> None:
@@ -480,7 +480,7 @@ def test_break(self) -> None:
a = 4
assert a == 2
""",
- lines=lines, missing=missing
+ lines=lines, missing=missing,
)
def test_continue(self) -> None:
@@ -498,7 +498,7 @@ def test_continue(self) -> None:
a = 4
assert a == 11
""",
- lines=lines, missing=missing
+ lines=lines, missing=missing,
)
def test_strange_unexecuted_continue(self) -> None:
@@ -538,7 +538,7 @@ def test_import(self) -> None:
from sys import path
a = 1
""",
- [1,2,3], ""
+ [1,2,3], "",
)
self.check_coverage("""\
import string
@@ -546,7 +546,7 @@ def test_import(self) -> None:
from sys import path
a = 1
""",
- [1,2,3,4], "3"
+ [1,2,3,4], "3",
)
self.check_coverage("""\
import string, \\
@@ -556,34 +556,34 @@ def test_import(self) -> None:
stdout
a = 1
""",
- [1,4,6], ""
+ [1,4,6], "",
)
self.check_coverage("""\
import sys, sys as s
assert s.path == sys.path
""",
- [1,2], ""
+ [1,2], "",
)
self.check_coverage("""\
import sys, \\
sys as s
assert s.path == sys.path
""",
- [1,3], ""
+ [1,3], "",
)
self.check_coverage("""\
from sys import path, \\
path as p
assert p == path
""",
- [1,3], ""
+ [1,3], "",
)
self.check_coverage("""\
from sys import \\
*
assert len(path) > 0
""",
- [1,3], ""
+ [1,3], "",
)
def test_global(self) -> None:
@@ -597,7 +597,7 @@ def fn():
fn()
assert g == 2 and h == 2 and i == 2
""",
- [1,2,6,7,8], ""
+ [1,2,6,7,8], "",
)
self.check_coverage("""\
g = h = i = 1
@@ -606,7 +606,7 @@ def fn():
fn()
assert g == 2 and h == 1 and i == 1
""",
- [1,2,3,4,5], ""
+ [1,2,3,4,5], "",
)
def test_exec(self) -> None:
@@ -618,7 +618,7 @@ def test_exec(self) -> None:
"2")
assert a == 2 and b == 2 and c == 2
""",
- [1,2,3,6], ""
+ [1,2,3,6], "",
)
self.check_coverage("""\
vars = {'a': 1, 'b': 1, 'c': 1}
@@ -628,7 +628,7 @@ def test_exec(self) -> None:
"2", vars)
assert vars['a'] == 2 and vars['b'] == 2 and vars['c'] == 2
""",
- [1,2,3,6], ""
+ [1,2,3,6], "",
)
self.check_coverage("""\
globs = {}
@@ -639,7 +639,7 @@ def test_exec(self) -> None:
"2", globs, locs)
assert locs['a'] == 2 and locs['b'] == 2 and locs['c'] == 2
""",
- [1,2,3,4,7], ""
+ [1,2,3,4,7], "",
)
def test_extra_doc_string(self) -> None:
@@ -671,7 +671,7 @@ def test_nonascii(self) -> None:
a = 2
b = 3
""",
- [2, 3]
+ [2, 3],
)
def test_module_docstring(self) -> None:
@@ -680,7 +680,7 @@ def test_module_docstring(self) -> None:
a = 2
b = 3
""",
- [2, 3]
+ [2, 3],
)
lines = [2, 3, 4]
self.check_coverage("""\
@@ -689,7 +689,7 @@ def test_module_docstring(self) -> None:
a = 3
b = 4
""",
- lines
+ lines,
)
@@ -704,7 +704,7 @@ def test_statement_list(self) -> None:
assert (a,b,c,d,e) == (1,2,3,4,5)
""",
- [1,2,3,5], ""
+ [1,2,3,5], "",
)
def test_if(self) -> None:
@@ -718,7 +718,7 @@ def test_if(self) -> None:
x = 7
assert x == 7
""",
- [1,2,3,4,5,7,8], ""
+ [1,2,3,4,5,7,8], "",
)
self.check_coverage("""\
a = 1
@@ -728,7 +728,7 @@ def test_if(self) -> None:
y = 5
assert x == 3
""",
- [1,2,3,5,6], "5"
+ [1,2,3,5,6], "5",
)
self.check_coverage("""\
a = 1
@@ -738,7 +738,7 @@ def test_if(self) -> None:
y = 5
assert y == 5
""",
- [1,2,3,5,6], "3"
+ [1,2,3,5,6], "3",
)
self.check_coverage("""\
a = 1; b = 2
@@ -751,7 +751,7 @@ def test_if(self) -> None:
z = 8
assert x == 4
""",
- [1,2,3,4,6,8,9], "6-8"
+ [1,2,3,4,6,8,9], "6-8",
)
def test_elif(self) -> None:
@@ -830,7 +830,7 @@ def f(self):
else:
x = 13
""",
- [1,2,3,4,5,6,7,8,9,10,11,13], "2-13"
+ [1,2,3,4,5,6,7,8,9,10,11,13], "2-13",
)
def test_split_if(self) -> None:
@@ -846,7 +846,7 @@ def test_split_if(self) -> None:
z = 7
assert x == 3
""",
- [1,2,4,5,7,9,10], "5-9"
+ [1,2,4,5,7,9,10], "5-9",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -860,7 +860,7 @@ def test_split_if(self) -> None:
z = 7
assert y == 5
""",
- [1,2,4,5,7,9,10], "4, 9"
+ [1,2,4,5,7,9,10], "4, 9",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -874,7 +874,7 @@ def test_split_if(self) -> None:
z = 7
assert z == 7
""",
- [1,2,4,5,7,9,10], "4, 7"
+ [1,2,4,5,7,9,10], "4, 7",
)
def test_pathological_split_if(self) -> None:
@@ -892,7 +892,7 @@ def test_pathological_split_if(self) -> None:
z = 7
assert x == 3
""",
- [1,2,5,6,9,11,12], "6-11"
+ [1,2,5,6,9,11,12], "6-11",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -908,7 +908,7 @@ def test_pathological_split_if(self) -> None:
z = 7
assert y == 5
""",
- [1,2,5,6,9,11,12], "5, 11"
+ [1,2,5,6,9,11,12], "5, 11",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -924,7 +924,7 @@ def test_pathological_split_if(self) -> None:
z = 7
assert z == 7
""",
- [1,2,5,6,9,11,12], "5, 9"
+ [1,2,5,6,9,11,12], "5, 9",
)
def test_absurd_split_if(self) -> None:
@@ -940,7 +940,7 @@ def test_absurd_split_if(self) -> None:
z = 7
assert x == 3
""",
- [1,2,4,5,7,9,10], "5-9"
+ [1,2,4,5,7,9,10], "5-9",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -954,7 +954,7 @@ def test_absurd_split_if(self) -> None:
z = 7
assert y == 5
""",
- [1,2,4,5,7,9,10], "4, 9"
+ [1,2,4,5,7,9,10], "4, 9",
)
self.check_coverage("""\
a = 1; b = 2; c = 3;
@@ -968,7 +968,7 @@ def test_absurd_split_if(self) -> None:
z = 7
assert z == 7
""",
- [1,2,4,5,7,9,10], "4, 7"
+ [1,2,4,5,7,9,10], "4, 7",
)
def test_constant_if(self) -> None:
@@ -993,7 +993,7 @@ def test_while(self) -> None:
a -= 1
assert a == 0 and b == 3
""",
- [1,2,3,4,5], ""
+ [1,2,3,4,5], "",
)
self.check_coverage("""\
a = 3; b = 0
@@ -1002,7 +1002,7 @@ def test_while(self) -> None:
break
assert a == 3 and b == 1
""",
- [1,2,3,4,5], ""
+ [1,2,3,4,5], "",
)
def test_while_else(self) -> None:
@@ -1016,7 +1016,7 @@ def test_while_else(self) -> None:
b = 99
assert a == 0 and b == 99
""",
- [1,2,3,4,6,7], ""
+ [1,2,3,4,6,7], "",
)
# Don't take the else branch.
self.check_coverage("""\
@@ -1029,7 +1029,7 @@ def test_while_else(self) -> None:
b = 99
assert a == 2 and b == 1
""",
- [1,2,3,4,5,7,8], "7"
+ [1,2,3,4,5,7,8], "7",
)
def test_split_while(self) -> None:
@@ -1041,7 +1041,7 @@ def test_split_while(self) -> None:
a -= 1
assert a == 0 and b == 3
""",
- [1,2,4,5,6], ""
+ [1,2,4,5,6], "",
)
self.check_coverage("""\
a = 3; b = 0
@@ -1052,7 +1052,7 @@ def test_split_while(self) -> None:
a -= 1
assert a == 0 and b == 3
""",
- [1,2,5,6,7], ""
+ [1,2,5,6,7], "",
)
def test_for(self) -> None:
@@ -1062,7 +1062,7 @@ def test_for(self) -> None:
a += i
assert a == 15
""",
- [1,2,3,4], ""
+ [1,2,3,4], "",
)
self.check_coverage("""\
a = 0
@@ -1072,7 +1072,7 @@ def test_for(self) -> None:
a += i
assert a == 15
""",
- [1,2,5,6], ""
+ [1,2,5,6], "",
)
self.check_coverage("""\
a = 0
@@ -1081,7 +1081,7 @@ def test_for(self) -> None:
break
assert a == 1
""",
- [1,2,3,4,5], ""
+ [1,2,3,4,5], "",
)
def test_for_else(self) -> None:
@@ -1093,7 +1093,7 @@ def test_for_else(self) -> None:
a = 99
assert a == 99
""",
- [1,2,3,5,6], ""
+ [1,2,3,5,6], "",
)
self.check_coverage("""\
a = 0
@@ -1104,7 +1104,7 @@ def test_for_else(self) -> None:
a = 123
assert a == 1
""",
- [1,2,3,4,6,7], "6"
+ [1,2,3,4,6,7], "6",
)
def test_split_for(self) -> None:
@@ -1115,7 +1115,7 @@ def test_split_for(self) -> None:
a += i
assert a == 15
""",
- [1,2,4,5], ""
+ [1,2,4,5], "",
)
self.check_coverage("""\
a = 0
@@ -1126,7 +1126,7 @@ def test_split_for(self) -> None:
a += i
assert a == 15
""",
- [1,2,6,7], ""
+ [1,2,6,7], "",
)
def test_try_except(self) -> None:
@@ -1138,7 +1138,7 @@ def test_try_except(self) -> None:
a = 99
assert a == 1
""",
- [1,2,3,4,5,6], "4-5"
+ [1,2,3,4,5,6], "4-5",
)
self.check_coverage("""\
a = 0
@@ -1149,7 +1149,7 @@ def test_try_except(self) -> None:
a = 99
assert a == 99
""",
- [1,2,3,4,5,6,7], ""
+ [1,2,3,4,5,6,7], "",
)
self.check_coverage("""\
a = 0
@@ -1162,7 +1162,7 @@ def test_try_except(self) -> None:
a = 123
assert a == 123
""",
- [1,2,3,4,5,6,7,8,9], "6"
+ [1,2,3,4,5,6,7,8,9], "6",
)
self.check_coverage("""\
a = 0
@@ -1177,7 +1177,7 @@ def test_try_except(self) -> None:
a = 123
assert a == 17
""",
- [1,2,3,4,5,6,7,8,9,10,11], "6, 9-10"
+ [1,2,3,4,5,6,7,8,9,10,11], "6, 9-10",
)
self.check_coverage("""\
a = 0
@@ -1232,7 +1232,7 @@ def test_try_finally(self) -> None:
a = 99
assert a == 99
""",
- [1,2,3,5,6], ""
+ [1,2,3,5,6], "",
)
self.check_coverage("""\
a = 0; b = 0
@@ -1246,7 +1246,7 @@ def test_try_finally(self) -> None:
a = 99
assert a == 99 and b == 123
""",
- [1,2,3,4,5,7,8,9,10], ""
+ [1,2,3,4,5,7,8,9,10], "",
)
def test_function_def(self) -> None:
@@ -1260,7 +1260,7 @@ def foo():
a = foo()
assert a == 1
""",
- [1,2,5,7,8], ""
+ [1,2,5,7,8], "",
)
self.check_coverage("""\
def foo(
@@ -1274,7 +1274,7 @@ def foo(
x = foo(17, 23)
assert x == 40
""",
- [1,7,9,10], ""
+ [1,7,9,10], "",
)
self.check_coverage("""\
def foo(
@@ -1291,7 +1291,7 @@ def foo(
x = foo()
assert x == 22
""",
- [1,10,12,13], ""
+ [1,10,12,13], "",
)
def test_class_def(self) -> None:
@@ -1331,7 +1331,7 @@ def test_default(self) -> None:
f = 6#\tpragma:\tno cover
g = 7
""",
- [1,3,5,7]
+ [1,3,5,7],
)
def test_two_excludes(self) -> None:
@@ -1344,7 +1344,7 @@ def test_two_excludes(self) -> None:
c = 6 # -xx
assert a == 1 and b == 2
""",
- [1,3,5,7], "5", excludes=['-cc', '-xx']
+ [1,3,5,7], "5", excludes=['-cc', '-xx'],
)
def test_excluding_elif_suites(self) -> None:
@@ -1363,7 +1363,7 @@ def test_excluding_elif_suites(self) -> None:
b = 12
assert a == 4 and b == 5 and c == 6
""",
- [1,3,4,5,6,11,12,13], "11-12", excludes=['#pragma: NO COVER']
+ [1,3,4,5,6,11,12,13], "11-12", excludes=['#pragma: NO COVER'],
)
def test_excluding_try_except(self) -> None:
@@ -1440,7 +1440,7 @@ def p1(arg):
assert p1(10) == 20
""",
- lines, ""
+ lines, "",
)
def test_function_decorators_with_args(self) -> None:
@@ -1459,7 +1459,7 @@ def boosted(arg):
assert boosted(10) == 200
""",
- lines, ""
+ lines, "",
)
def test_double_function_decorators(self) -> None:
@@ -1492,7 +1492,7 @@ def boosted2(arg):
assert boosted2(10) == 200
""",
- lines, ""
+ lines, "",
)
@@ -1520,7 +1520,7 @@ def __exit__(self, type, value, tb):
except:
desc = "caught"
""",
- [1,2,3,5,6,8,9,10,11,13,14,15,16,17,18], ""
+ [1,2,3,5,6,8,9,10,11,13,14,15,16,17,18], "",
)
def test_try_except_finally(self) -> None:
diff --git a/tests/test_data.py b/tests/test_data.py
index 1ecd490a7..76dad3c4e 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -13,7 +13,7 @@
import threading
from typing import (
- Any, Callable, Collection, Dict, Iterable, Mapping, Set, TypeVar, Union
+ Any, Callable, Collection, Dict, Iterable, Mapping, Set, TypeVar, Union,
)
from unittest import mock
@@ -726,7 +726,7 @@ def test_debug_output_with_debug_option(self) -> None:
r"Opening data file '.*\.coverage'\n" +
r"Initing data file '.*\.coverage'\n" +
r"Opening data file '.*\.coverage'\n$",
- debug.get_output()
+ debug.get_output(),
)
def test_debug_output_without_debug_option(self) -> None:
diff --git a/tests/test_files.py b/tests/test_files.py
index 09da65c20..d08b9a78d 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -78,7 +78,7 @@ def test_canonical_filename_ensure_cache_hit(self) -> None:
"curdir, sep", [
("/", "/"),
("X:\\", "\\"),
- ]
+ ],
)
def test_relative_dir_for_root(self, curdir: str, sep: str) -> None:
with mock.patch.object(files.os, 'curdir', new=curdir):
@@ -97,7 +97,7 @@ def test_relative_dir_for_root(self, curdir: str, sep: str) -> None:
("src/files.pex", "src/files.pex/foo.py", True),
("src/files.zip", "src/morefiles.zip/foo.py", False),
("src/files.pex", "src/files.pex/zipfiles/files.zip/foo.py", True),
- ]
+ ],
)
def test_source_exists(self, to_make: str, to_check: str, answer: bool) -> None:
# source_exists won't look inside the zipfile, so it's fine to make
@@ -123,7 +123,7 @@ def test_source_exists(self, to_make: str, to_check: str, answer: bool) -> None:
r"\nostrum\exercitationem\ullam\corporis\suscipit\laboriosam" +
r"\Montréal\☺\my_program.py",
# flat:
- "d_e597dfacb73a23d5_my_program_py"
+ "d_e597dfacb73a23d5_my_program_py",
),
])
def test_flat_rootname(original: str, flat: str) -> None:
@@ -263,7 +263,7 @@ def globs_to_regex_params(
matches=["a+b/foo", "a+b/foobar", "x{y}z/foobar"],
nomatches=["aab/foo", "ab/foo", "xyz/foo"],
),
- ]))
+ ])),
)
def test_globs_to_regex(
patterns: Iterable[str],
diff --git a/tests/test_json.py b/tests/test_json.py
index 27aab867f..b51d0b54a 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -47,7 +47,7 @@ def _assert_expected_json_report(
with open(output_path) as result_file:
parsed_result = json.load(result_file)
self.assert_recent_datetime(
- datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f")
+ datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f"),
)
del (parsed_result['meta']['timestamp'])
expected_result["meta"].update({
@@ -165,19 +165,19 @@ def run_context_test(self, relative_files: bool) -> None:
'excluded_lines': [],
"contexts": {
"1": [
- "cool_test"
+ "cool_test",
],
"2": [
- "cool_test"
+ "cool_test",
],
"4": [
- "cool_test"
+ "cool_test",
],
"5": [
- "cool_test"
+ "cool_test",
],
"8": [
- "cool_test"
+ "cool_test",
],
},
'summary': {
diff --git a/tests/test_numbits.py b/tests/test_numbits.py
index 988cd91a5..f921dee47 100644
--- a/tests/test_numbits.py
+++ b/tests/test_numbits.py
@@ -113,7 +113,7 @@ def setUp(self) -> None:
[
(i, nums_to_numbits(range(i, 100, i)))
for i in range(1, 11)
- ]
+ ],
)
self.addCleanup(self.cursor.close)
@@ -122,7 +122,7 @@ def test_numbits_union(self) -> None:
"select numbits_union(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
- ")"
+ ")",
)
expected = [
7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49,
@@ -136,7 +136,7 @@ def test_numbits_intersection(self) -> None:
"select numbits_intersection(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
- ")"
+ ")",
)
answer = numbits_to_nums(list(res)[0][0])
assert [63] == answer
@@ -144,14 +144,14 @@ def test_numbits_intersection(self) -> None:
def test_numbits_any_intersection(self) -> None:
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
- (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
+ (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5])),
)
answer = [any_inter for (any_inter,) in res]
assert [1] == answer
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
- (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
+ (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9])),
)
answer = [any_inter for (any_inter,) in res]
assert [0] == answer
diff --git a/tests/test_oddball.py b/tests/test_oddball.py
index f12176ec8..d74befe60 100644
--- a/tests/test_oddball.py
+++ b/tests/test_oddball.py
@@ -44,7 +44,7 @@ def neverCalled():
fromMainThread()
other.join()
""",
- [1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15], "10"
+ [1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15], "10",
)
def test_thread_run(self) -> None:
@@ -64,7 +64,7 @@ def do_work(self):
thd.start()
thd.join()
""",
- [1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14], ""
+ [1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14], "",
)
@@ -83,7 +83,7 @@ def recur(n):
recur(495) # We can get at least this many stack frames.
i = 8 # and this line will be traced
""",
- [1, 2, 3, 5, 7, 8], ""
+ [1, 2, 3, 5, 7, 8], "",
)
def test_long_recursion(self) -> None:
@@ -99,7 +99,7 @@ def recur(n):
recur(100000) # This is definitely too many frames.
""",
- [1, 2, 3, 5, 7], ""
+ [1, 2, 3, 5, 7], "",
)
def test_long_recursion_recovery(self) -> None:
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 09f02f75d..f17c8f2be 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -47,7 +47,7 @@ class Bar:
pass
""")
assert parser.exit_counts() == {
- 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1
+ 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1,
}
def test_generator_exit_counts(self) -> None:
@@ -79,7 +79,7 @@ def test_try_except(self) -> None:
b = 9
""")
assert parser.exit_counts() == {
- 1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1
+ 1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1,
}
def test_excluded_classes(self) -> None:
@@ -93,7 +93,7 @@ class Bar:
pass
""")
assert parser.exit_counts() == {
- 1:0, 2:1, 3:1
+ 1:0, 2:1, 3:1,
}
def test_missing_branch_to_excluded_code(self) -> None:
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index bec6551ad..39390ee35 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -207,7 +207,7 @@ def coverage_init(reg, options):
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
with swallow_warnings(
- r"Plugin file tracers \(plugin_sys_info.Plugin\) aren't supported with .*"
+ r"Plugin file tracers \(plugin_sys_info.Plugin\) aren't supported with .*",
):
cov.start()
cov.stop() # pragma: nested
diff --git a/tests/test_process.py b/tests/test_process.py
index 43518066e..22180b2f2 100644
--- a/tests/test_process.py
+++ b/tests/test_process.py
@@ -730,7 +730,7 @@ def test_coverage_run_dashm_equal_to_doubledashsource(self) -> None:
self.add_test_modules_to_pythonpath()
expected = self.run_command("python -m process_test.try_execfile")
actual = self.run_command(
- "coverage run --source process_test.try_execfile -m process_test.try_execfile"
+ "coverage run --source process_test.try_execfile -m process_test.try_execfile",
)
self.assert_tryexecfile_output(expected, actual)
@@ -750,7 +750,7 @@ def test_coverage_run_dashm_superset_of_doubledashsource(self) -> None:
self.add_test_modules_to_pythonpath()
expected = self.run_command("python -m process_test.try_execfile")
actual = self.run_command(
- "coverage run --source process_test -m process_test.try_execfile"
+ "coverage run --source process_test -m process_test.try_execfile",
)
self.assert_tryexecfile_output(expected, actual)
@@ -843,7 +843,7 @@ def test_coverage_custom_script(self) -> None:
@pytest.mark.skipif(env.WINDOWS, reason="Windows can't make symlinks")
@pytest.mark.skipif(
platform.python_version().endswith("+"),
- reason="setuptools barfs on dev versions: https://github.com/pypa/packaging/issues/678"
+ reason="setuptools barfs on dev versions: https://github.com/pypa/packaging/issues/678",
# https://github.com/nedbat/coveragepy/issues/1556
)
def test_bug_862(self) -> None:
@@ -927,7 +927,7 @@ def excepthook(*args):
assert line_counts(data)['excepthook.py'] == 7
@pytest.mark.skipif(not env.CPYTHON,
- reason="non-CPython handles excepthook exits differently, punt for now."
+ reason="non-CPython handles excepthook exits differently, punt for now.",
)
def test_excepthook_exit(self) -> None:
self.make_file("excepthook_exit.py", """\
@@ -1073,7 +1073,7 @@ def test_report_99p9_is_not_ok(self) -> None:
"a = 1\n" +
"b = 2\n" * 2000 +
"if a > 3:\n" +
- " c = 4\n"
+ " c = 4\n",
)
self.make_data_file(lines={abs_file("ninety_nine_plus.py"): range(1, 2002)})
st, out = self.run_command_status("coverage report --fail-under=100")
diff --git a/tests/test_report_common.py b/tests/test_report_common.py
index 69191e523..3828bb6fd 100644
--- a/tests/test_report_common.py
+++ b/tests/test_report_common.py
@@ -37,14 +37,14 @@ def make_files(self, data: str, settings: bool = False) -> None:
lines={
abs_file("ver1/program.py"): [1, 2, 3, 5],
abs_file("ver2/program.py"): [1, 3, 4, 5],
- }
+ },
)
else:
self.make_data_file(
arcs={
abs_file("ver1/program.py"): arcz_to_arcs(".1 12 23 35 5."),
abs_file("ver2/program.py"): arcz_to_arcs(".1 13 34 45 5."),
- }
+ },
)
if settings:
@@ -198,7 +198,7 @@ def make_files(self) -> None:
lines={
abs_file("good.j2"): [1, 3, 5, 7, 9],
abs_file("bad.j2"): [1, 3, 5, 7, 9],
- }
+ },
)
def test_report(self) -> None:
@@ -229,7 +229,7 @@ def test_html(self) -> None:
| 0 |
67% |
- """
+ """,
)
doesnt_contain("htmlcov/index.html", "bad.j2")
@@ -245,7 +245,7 @@ def test_xml(self) -> None:
'',
)
doesnt_contain("coverage.xml", 'filename="bad.j2"')
- doesnt_contain("coverage.xml", ' None:
self.make_files()
diff --git a/tests/test_results.py b/tests/test_results.py
index f2a5ae83f..5afddc523 100644
--- a/tests/test_results.py
+++ b/tests/test_results.py
@@ -147,19 +147,19 @@ def test_format_lines(
{1,2,3,4,5,10,11,12,13,14},
{1,2,5,10,11,13,14},
(),
- "1-2, 5-11, 13-14"
+ "1-2, 5-11, 13-14",
),
(
[1,2,3,4,5,10,11,12,13,14,98,99],
[1,2,5,10,11,13,14,99],
[(3, [4]), (5, [10, 11]), (98, [100, -1])],
- "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99"
+ "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99",
),
(
[1,2,3,4,98,99,100,101,102,103,104],
[1,2,99,102,103,104],
[(3, [4]), (104, [-1])],
- "1-2, 3->4, 99, 102-104"
+ "1-2, 3->4, 99, 102-104",
),
])
def test_format_lines_with_arcs(
diff --git a/tests/test_setup.py b/tests/test_setup.py
index a7a97d1fe..9061d4dfe 100644
--- a/tests/test_setup.py
+++ b/tests/test_setup.py
@@ -26,7 +26,7 @@ def setUp(self) -> None:
def test_metadata(self) -> None:
status, output = self.run_command_status(
- "python setup.py --description --version --url --author"
+ "python setup.py --description --version --url --author",
)
assert status == 0
out = output.splitlines()
diff --git a/tests/test_templite.py b/tests/test_templite.py
index e34f71692..0ca9b3ccd 100644
--- a/tests/test_templite.py
+++ b/tests/test_templite.py
@@ -117,7 +117,7 @@ def test_loops(self) -> None:
self.try_render(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
- "Look: 1, 2, 3, 4, done."
+ "Look: 1, 2, 3, 4, done.",
)
# Loop iterables can be filtered.
def rev(l: List[int]) -> List[int]:
@@ -129,21 +129,21 @@ def rev(l: List[int]) -> List[int]:
self.try_render(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
- "Look: 4, 3, 2, 1, done."
+ "Look: 4, 3, 2, 1, done.",
)
def test_empty_loops(self) -> None:
self.try_render(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums':[]},
- "Empty: done."
+ "Empty: done.",
)
def test_multiline_loops(self) -> None:
self.try_render(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums':[1,2,3]},
- "Look: \n\n1, \n\n2, \n\n3, \ndone."
+ "Look: \n\n1, \n\n2, \n\n3, \ndone.",
)
def test_multiple_loops(self) -> None:
@@ -151,46 +151,46 @@ def test_multiple_loops(self) -> None:
"{% for n in nums %}{{n}}{% endfor %} and " +
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1,2,3]},
- "123 and 123"
+ "123 and 123",
)
def test_comments(self) -> None:
# Single-line comments work:
self.try_render(
"Hello, {# Name goes here: #}{{name}}!",
- {'name':'Ned'}, "Hello, Ned!"
+ {'name':'Ned'}, "Hello, Ned!",
)
# and so do multi-line comments:
self.try_render(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
- {'name':'Ned'}, "Hello, Ned!"
+ {'name':'Ned'}, "Hello, Ned!",
)
def test_if(self) -> None:
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
- "Hi, NED!"
+ "Hi, NED!",
)
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
- "Hi, BEN!"
+ "Hi, BEN!",
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
- "Hi, !"
+ "Hi, !",
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
- "Hi, NED!"
+ "Hi, NED!",
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
- "Hi, NEDBEN!"
+ "Hi, NEDBEN!",
)
def test_complex_if(self) -> None:
@@ -207,24 +207,24 @@ def getit(self): # type: ignore
"{% if obj.getit.y|str %}S{% endif %}" +
"!",
{ 'obj': obj, 'str': str },
- "@XS!"
+ "@XS!",
)
def test_loop_if(self) -> None:
self.try_render(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0,1,2]},
- "@0Z1Z2!"
+ "@0Z1Z2!",
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0,1,2]},
- "X@012!"
+ "X@012!",
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
- "X!"
+ "X!",
)
def test_nested_loops(self) -> None:
@@ -235,7 +235,7 @@ def test_nested_loops(self) -> None:
"{% endfor %}" +
"!",
{'nums': [0,1,2], 'abc': ['a', 'b', 'c']},
- "@a0b0c0a1b1c1a2b2c2!"
+ "@a0b0c0a1b1c1a2b2c2!",
)
def test_whitespace_handling(self) -> None:
@@ -244,7 +244,7 @@ def test_whitespace_handling(self) -> None:
" {% for a in abc %}{{a}}{{n}}{% endfor %}\n" +
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
- "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n"
+ "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n",
)
self.try_render(
"@{% for n in nums -%}\n" +
@@ -256,7 +256,7 @@ def test_whitespace_handling(self) -> None:
" {% endfor %}\n" +
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
- "@a00b00c00\na11b11c11\na22b22c22\n!\n"
+ "@a00b00c00\na11b11c11\na22b22c22\n!\n",
)
self.try_render(
"@{% for n in nums -%}\n" +
@@ -264,7 +264,7 @@ def test_whitespace_handling(self) -> None:
" x\n" +
"{% endfor %}!\n",
{'nums': [0, 1, 2]},
- "@0x\n1x\n2x\n!\n"
+ "@0x\n1x\n2x\n!\n",
)
self.try_render(" hello ", {}, " hello ")
@@ -283,14 +283,14 @@ def test_eat_whitespace(self) -> None:
"{% endfor %}!\n" +
"{% endjoined %}\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
- "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n"
+ "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n",
)
def test_non_ascii(self) -> None:
self.try_render(
"{{where}} ollǝɥ",
{ 'where': 'ǝɹǝɥʇ' },
- "ǝɹǝɥʇ ollǝɥ"
+ "ǝɹǝɥʇ ollǝɥ",
)
def test_exception_during_evaluation(self) -> None:
@@ -298,7 +298,7 @@ def test_exception_during_evaluation(self) -> None:
regex = "^Couldn't evaluate None.bar$"
with pytest.raises(TempliteValueError, match=regex):
self.try_render(
- "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
+ "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there",
)
def test_bad_names(self) -> None:
diff --git a/tests/test_testing.py b/tests/test_testing.py
index 3c3b0622e..5c60a08a6 100644
--- a/tests/test_testing.py
+++ b/tests/test_testing.py
@@ -67,23 +67,23 @@ def test_file_count(self) -> None:
self.assert_file_count("*.q", 0)
msg = re.escape(
"There should be 13 files matching 'a*.txt', but there are these: " +
- "['abcde.txt', 'afile.txt', 'axczz.txt']"
+ "['abcde.txt', 'afile.txt', 'axczz.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("a*.txt", 13)
msg = re.escape(
"There should be 12 files matching '*c*.txt', but there are these: " +
- "['abcde.txt', 'axczz.txt']"
+ "['abcde.txt', 'axczz.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("*c*.txt", 12)
msg = re.escape(
- "There should be 11 files matching 'afile.*', but there are these: ['afile.txt']"
+ "There should be 11 files matching 'afile.*', but there are these: ['afile.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("afile.*", 11)
msg = re.escape(
- "There should be 10 files matching '*.q', but there are these: []"
+ "There should be 10 files matching '*.q', but there are these: []",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("*.q", 10)
@@ -297,7 +297,7 @@ def test_check_coverage_unpredicted(self) -> None:
self.CODE,
arcz=self.ARCZ,
arcz_missing=self.ARCZ_MISSING,
- arcz_unpredicted=self.ARCZ_UNPREDICTED.replace("7", "3")
+ arcz_unpredicted=self.ARCZ_UNPREDICTED.replace("7", "3"),
)
@@ -390,7 +390,7 @@ def test_arcz_to_arcs(self, arcz: str, arcs: List[TArc]) -> None:
"(35, -10) # Z-A\n" +
"(1, 33) # 1X\n" +
"(100, 7) # ?7\n"
- )
+ ),
),
])
def test_arcs_to_arcz_repr(self, arcs: List[TArc], arcz_repr: str) -> None:
diff --git a/tests/test_venv.py b/tests/test_venv.py
index 5a20f273b..30c1e7610 100644
--- a/tests/test_venv.py
+++ b/tests/test_venv.py
@@ -145,7 +145,7 @@ def testp():
"./third_pkg " +
"-e ./another_pkg " +
"-e ./bug888/app -e ./bug888/plugin " +
- COVERAGE_INSTALL_ARGS
+ COVERAGE_INSTALL_ARGS,
)
shutil.rmtree("third_pkg")
@@ -198,7 +198,7 @@ def get_trace_output(self) -> str:
@pytest.mark.parametrize('install_source_in_venv', [True, False])
def test_third_party_venv_isnt_measured(
- self, coverage_command: str, install_source_in_venv: bool
+ self, coverage_command: str, install_source_in_venv: bool,
) -> None:
if install_source_in_venv:
make_file("setup.py", """\
@@ -345,7 +345,7 @@ def test_installed_namespace_packages(self, coverage_command: str) -> None:
def test_bug_888(self, coverage_command: str) -> None:
out = run_in_venv(
coverage_command +
- " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py"
+ " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py",
)
# When the test fails, the output includes "Already imported a file that will be measured"
assert out == "Plugin here\n"
diff --git a/tests/test_xml.py b/tests/test_xml.py
index 1d06ba50e..0b1ddd58b 100644
--- a/tests/test_xml.py
+++ b/tests/test_xml.py
@@ -322,7 +322,7 @@ def test_accented_directory(self) -> None:
def test_no_duplicate_packages(self) -> None:
self.make_file(
"namespace/package/__init__.py",
- "from . import sample; from . import test; from .subpackage import test"
+ "from . import sample; from . import test; from .subpackage import test",
)
self.make_file("namespace/package/sample.py", "print('package.sample')")
self.make_file("namespace/package/test.py", "print('package.test')")
From 401a63bf08bdfd780b662f64d2dfe3603f2584dd Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Tue, 27 Feb 2024 16:17:27 -0500
Subject: [PATCH 04/24] style: modernize type hints, a few more f-strings
I used:
```
ruff --select=UP,F --unsafe-fixes --fix *.py {coverage,tests,ci}/*.py
```
and then fixed a few "unused imports" that are actually needed by hand.
---
coverage/__init__.py | 1 -
coverage/annotate.py | 6 +-
coverage/cmdline.py | 34 +++----
coverage/collector.py | 36 ++++----
coverage/config.py | 82 ++++++++---------
coverage/context.py | 12 +--
coverage/control.py | 182 ++++++++++++++++++-------------------
coverage/data.py | 14 +--
coverage/debug.py | 36 ++++----
coverage/disposition.py | 8 +-
coverage/env.py | 4 +-
coverage/execfile.py | 22 ++---
coverage/files.py | 24 ++---
coverage/html.py | 34 +++----
coverage/inorout.py | 40 ++++----
coverage/jsonreport.py | 12 +--
coverage/lcovreport.py | 4 +-
coverage/misc.py | 21 ++---
coverage/multiproc.py | 10 +-
coverage/numbits.py | 4 +-
coverage/parser.py | 178 ++++++++++++++++++------------------
coverage/phystokens.py | 12 +--
coverage/plugin.py | 32 +++----
coverage/plugin_support.py | 46 +++++-----
coverage/python.py | 28 +++---
coverage/pytracer.py | 32 +++----
coverage/report.py | 26 +++---
coverage/report_core.py | 10 +-
coverage/results.py | 26 +++---
coverage/sqldata.py | 52 +++++------
coverage/sqlitedb.py | 8 +-
coverage/sysmon.py | 31 +++----
coverage/templite.py | 25 +++--
coverage/tomlconfig.py | 24 ++---
coverage/types.py | 22 ++---
coverage/xmlreport.py | 8 +-
igor.py | 4 +-
tests/conftest.py | 4 +-
tests/coveragetest.py | 55 ++++++-----
tests/goldtest.py | 10 +-
tests/helpers.py | 27 +++---
tests/mixins.py | 6 +-
tests/plugin1.py | 10 +-
tests/plugin2.py | 10 +-
tests/test_api.py | 12 +--
tests/test_arcs.py | 8 +-
tests/test_cmdline.py | 18 ++--
tests/test_concurrency.py | 8 +-
tests/test_context.py | 28 +++---
tests/test_data.py | 4 +-
tests/test_files.py | 4 +-
tests/test_html.py | 18 ++--
tests/test_json.py | 10 +-
tests/test_numbits.py | 8 +-
tests/test_plugins.py | 14 +--
tests/test_process.py | 4 +-
tests/test_report.py | 3 +-
tests/test_report_core.py | 16 ++--
tests/test_results.py | 6 +-
tests/test_templite.py | 8 +-
tests/test_testing.py | 7 +-
tests/test_xml.py | 6 +-
62 files changed, 719 insertions(+), 735 deletions(-)
diff --git a/coverage/__init__.py b/coverage/__init__.py
index d22d09deb..c3403d444 100644
--- a/coverage/__init__.py
+++ b/coverage/__init__.py
@@ -39,4 +39,3 @@
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
-import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order
diff --git a/coverage/annotate.py b/coverage/annotate.py
index 2ef89c967..46a82a81d 100644
--- a/coverage/annotate.py
+++ b/coverage/annotate.py
@@ -8,7 +8,7 @@
import os
import re
-from typing import Iterable, Optional, TYPE_CHECKING
+from typing import Iterable, TYPE_CHECKING
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, isolate_module
@@ -48,12 +48,12 @@ class AnnotateReporter:
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
- self.directory: Optional[str] = None
+ self.directory: str | None = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
- def report(self, morfs: Optional[Iterable[TMorf]], directory: Optional[str] = None) -> None:
+ def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None:
"""Run the report.
See `coverage.report()` for arguments.
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 25012019a..463ea8fde 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -14,7 +14,7 @@
import textwrap
import traceback
-from typing import cast, Any, List, NoReturn, Optional, Tuple
+from typing import cast, Any, NoReturn
import coverage
from coverage import Coverage
@@ -281,7 +281,7 @@ class OptionParserError(Exception):
"""Used to stop the optparse error handler ending the process."""
pass
- def parse_args_ok(self, args: List[str]) -> Tuple[bool, Optional[optparse.Values], List[str]]:
+ def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]:
"""Call optparse.parse_args, but return a triple:
(ok, options, args)
@@ -317,9 +317,9 @@ class CmdOptionParser(CoverageOptionParser):
def __init__(
self,
action: str,
- options: List[optparse.Option],
+ options: list[optparse.Option],
description: str,
- usage: Optional[str] = None,
+ usage: str | None = None,
):
"""Create an OptionParser for a coverage.py command.
@@ -549,9 +549,9 @@ def get_prog_name(self) -> str:
def show_help(
- error: Optional[str] = None,
- topic: Optional[str] = None,
- parser: Optional[optparse.OptionParser] = None,
+ error: str | None = None,
+ topic: str | None = None,
+ parser: optparse.OptionParser | None = None,
) -> None:
"""Display an error message, or the named topic."""
assert error or topic or parser
@@ -605,7 +605,7 @@ def __init__(self) -> None:
self.global_option = False
self.coverage: Coverage
- def command_line(self, argv: List[str]) -> int:
+ def command_line(self, argv: list[str]) -> int:
"""The bulk of the command line interface to coverage.py.
`argv` is the argument list to process.
@@ -620,7 +620,7 @@ def command_line(self, argv: List[str]) -> int:
# The command syntax we parse depends on the first argument. Global
# switch syntax always starts with an option.
- parser: Optional[optparse.OptionParser]
+ parser: optparse.OptionParser | None
self.global_option = argv[0].startswith("-")
if self.global_option:
parser = GlobalOptionParser()
@@ -772,7 +772,7 @@ def command_line(self, argv: List[str]) -> int:
def do_help(
self,
options: optparse.Values,
- args: List[str],
+ args: list[str],
parser: optparse.OptionParser,
) -> bool:
"""Deal with help requests.
@@ -807,7 +807,7 @@ def do_help(
return False
- def do_run(self, options: optparse.Values, args: List[str]) -> int:
+ def do_run(self, options: optparse.Values, args: list[str]) -> int:
"""Implementation of 'coverage run'."""
if not args:
@@ -866,7 +866,7 @@ def do_run(self, options: optparse.Values, args: List[str]) -> int:
return OK
- def do_debug(self, args: List[str]) -> int:
+ def do_debug(self, args: list[str]) -> int:
"""Implementation of 'coverage debug'."""
if not args:
@@ -899,7 +899,7 @@ def do_debug(self, args: List[str]) -> int:
return OK
-def unshell_list(s: str) -> Optional[List[str]]:
+def unshell_list(s: str) -> list[str] | None:
"""Turn a command-line argument into a list."""
if not s:
return None
@@ -913,7 +913,7 @@ def unshell_list(s: str) -> Optional[List[str]]:
return s.split(",")
-def unglob_args(args: List[str]) -> List[str]:
+def unglob_args(args: list[str]) -> list[str]:
"""Interpret shell wildcards for platforms that need it."""
if env.WINDOWS:
globbed = []
@@ -958,7 +958,7 @@ def unglob_args(args: List[str]) -> List[str]:
}
-def main(argv: Optional[List[str]] = None) -> Optional[int]:
+def main(argv: list[str] | None = None) -> int | None:
"""The main entry point to coverage.py.
This is installed as the script entry point.
@@ -997,8 +997,8 @@ def main(argv: Optional[List[str]] = None) -> Optional[int]:
original_main = main
def main( # pylint: disable=function-redefined
- argv: Optional[List[str]] = None,
- ) -> Optional[int]:
+ argv: list[str] | None = None,
+ ) -> int | None:
"""A wrapper around main that profiles."""
profiler = SimpleLauncher.launch()
try:
diff --git a/coverage/collector.py b/coverage/collector.py
index 9a7d5c02d..9bd380c2e 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -11,7 +11,7 @@
from types import FrameType
from typing import (
- cast, Any, Callable, Dict, List, Mapping, Optional, Set, Type, TypeVar,
+ cast, Any, Callable, Dict, List, Mapping, Set, TypeVar,
)
from coverage import env
@@ -70,7 +70,7 @@ class Collector:
# The stack of active Collectors. Collectors are added here when started,
# and popped when stopped. Collectors on the stack are paused when not
# the top, and resumed when they become the top again.
- _collectors: List[Collector] = []
+ _collectors: list[Collector] = []
# The concurrency settings we support here.
LIGHT_THREADS = {"greenlet", "eventlet", "gevent"}
@@ -79,12 +79,12 @@ def __init__(
self,
should_trace: Callable[[str, FrameType], TFileDisposition],
check_include: Callable[[str, FrameType], bool],
- should_start_context: Optional[Callable[[FrameType], Optional[str]]],
+ should_start_context: Callable[[FrameType], str | None] | None,
file_mapper: Callable[[str], str],
timid: bool,
branch: bool,
warn: TWarnFn,
- concurrency: List[str],
+ concurrency: list[str],
metacov: bool,
) -> None:
"""Create a collector.
@@ -136,16 +136,16 @@ def __init__(
self.covdata: CoverageData
self.threading = None
- self.static_context: Optional[str] = None
+ self.static_context: str | None = None
self.origin = short_stack()
self.concur_id_func = None
- self._trace_class: Type[TracerCore]
- self.file_disposition_class: Type[TFileDisposition]
+ self._trace_class: type[TracerCore]
+ self.file_disposition_class: type[TFileDisposition]
- core: Optional[str]
+ core: str | None
if timid:
core = "pytrace"
else:
@@ -240,7 +240,7 @@ def __init__(
def __repr__(self) -> str:
return f""
- def use_data(self, covdata: CoverageData, context: Optional[str]) -> None:
+ def use_data(self, covdata: CoverageData, context: str | None) -> None:
"""Use `covdata` for recording data."""
self.covdata = covdata
self.static_context = context
@@ -268,9 +268,9 @@ def reset(self) -> None:
# A dictionary mapping file names to file tracer plugin names that will
# handle them.
- self.file_tracers: Dict[str, str] = {}
+ self.file_tracers: dict[str, str] = {}
- self.disabled_plugins: Set[str] = set()
+ self.disabled_plugins: set[str] = set()
# The .should_trace_cache attribute is a cache from file names to
# coverage.FileDisposition objects, or None. When a file is first
@@ -301,7 +301,7 @@ def reset(self) -> None:
self.should_trace_cache = {}
# Our active Tracers.
- self.tracers: List[TracerCore] = []
+ self.tracers: list[TracerCore] = []
self._clear_data()
@@ -342,12 +342,12 @@ def _start_tracer(self) -> TTraceFn | None:
#
# New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681
- def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> Optional[TTraceFn]:
+ def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None:
"""Called on new threads, installs the real tracer."""
# Remove ourselves as the trace function.
sys.settrace(None)
# Install the real tracer.
- fn: Optional[TTraceFn] = self._start_tracer()
+ fn: TTraceFn | None = self._start_tracer()
# Invoke the real trace function with the current event, to be sure
# not to lose an event.
if fn:
@@ -444,9 +444,9 @@ def _activity(self) -> bool:
"""
return any(tracer.activity() for tracer in self.tracers)
- def switch_context(self, new_context: Optional[str]) -> None:
+ def switch_context(self, new_context: str | None) -> None:
"""Switch to a new dynamic context."""
- context: Optional[str]
+ context: str | None
self.flush_data()
if self.static_context:
context = self.static_context
@@ -471,7 +471,7 @@ def cached_mapped_file(self, filename: str) -> str:
"""A locally cached version of file names mapped through file_mapper."""
return self.file_mapper(filename)
- def mapped_file_dict(self, d: Mapping[str, T]) -> Dict[str, T]:
+ def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]:
"""Return a dict like d, but with keys modified by file_mapper."""
# The call to list(items()) ensures that the GIL protects the dictionary
# iterator against concurrent modifications by tracers running
@@ -511,7 +511,7 @@ def flush_data(self) -> bool:
# Unpack the line number pairs packed into integers. See
# tracer.c:CTracer_record_pair for the C code that creates
# these packed ints.
- arc_data: Dict[str, List[TArc]] = {}
+ arc_data: dict[str, list[TArc]] = {}
packed_data = cast(Dict[str, Set[int]], self.data)
# The list() here and in the inner loop are to get a clean copy
diff --git a/coverage/config.py b/coverage/config.py
index 9eaf9dbd4..4e1c71f27 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -13,7 +13,7 @@
import re
from typing import (
- Any, Callable, Dict, Iterable, List, Optional, Tuple, Union,
+ Any, Callable, Iterable, Union,
)
from coverage.exceptions import ConfigError
@@ -46,12 +46,12 @@ def __init__(self, our_file: bool) -> None:
def read( # type: ignore[override]
self,
filenames: Iterable[str],
- encoding_unused: Optional[str] = None,
- ) -> List[str]:
+ encoding_unused: str | None = None,
+ ) -> list[str]:
"""Read a file name as UTF-8 configuration data."""
return super().read(filenames, encoding="utf-8")
- def real_section(self, section: str) -> Optional[str]:
+ def real_section(self, section: str) -> str | None:
"""Get the actual name of a section."""
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
@@ -69,7 +69,7 @@ def has_option(self, section: str, option: str) -> bool:
def has_section(self, section: str) -> bool:
return bool(self.real_section(section))
- def options(self, section: str) -> List[str]:
+ def options(self, section: str) -> list[str]:
real_section = self.real_section(section)
if real_section is not None:
return super().options(real_section)
@@ -77,7 +77,7 @@ def options(self, section: str) -> List[str]:
def get_section(self, section: str) -> TConfigSectionOut:
"""Get the contents of a section, as a dictionary."""
- d: Dict[str, TConfigValueOut] = {}
+ d: dict[str, TConfigValueOut] = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
@@ -103,7 +103,7 @@ def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # ty
v = substitute_variables(v, os.environ)
return v
- def getlist(self, section: str, option: str) -> List[str]:
+ def getlist(self, section: str, option: str) -> list[str]:
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
@@ -121,7 +121,7 @@ def getlist(self, section: str, option: str) -> List[str]:
values.append(value)
return values
- def getregexlist(self, section: str, option: str) -> List[str]:
+ def getregexlist(self, section: str, option: str) -> list[str]:
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
@@ -180,12 +180,12 @@ def __init__(self) -> None:
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
# We tried to read these config files.
- self.attempted_config_files: List[str] = []
+ self.attempted_config_files: list[str] = []
# We did read these config files, but maybe didn't find any content for us.
- self.config_files_read: List[str] = []
+ self.config_files_read: list[str] = []
# The file that gave us our configuration.
- self.config_file: Optional[str] = None
- self._config_contents: Optional[bytes] = None
+ self.config_file: str | None = None
+ self._config_contents: bytes | None = None
# Defaults for [run] and [report]
self._include = None
@@ -193,49 +193,49 @@ def __init__(self) -> None:
# Defaults for [run]
self.branch = False
- self.command_line: Optional[str] = None
- self.concurrency: List[str] = []
- self.context: Optional[str] = None
+ self.command_line: str | None = None
+ self.concurrency: list[str] = []
+ self.context: str | None = None
self.cover_pylib = False
self.data_file = ".coverage"
- self.debug: List[str] = []
- self.debug_file: Optional[str] = None
- self.disable_warnings: List[str] = []
- self.dynamic_context: Optional[str] = None
+ self.debug: list[str] = []
+ self.debug_file: str | None = None
+ self.disable_warnings: list[str] = []
+ self.dynamic_context: str | None = None
self.parallel = False
- self.plugins: List[str] = []
+ self.plugins: list[str] = []
self.relative_files = False
- self.run_include: List[str] = []
- self.run_omit: List[str] = []
+ self.run_include: list[str] = []
+ self.run_omit: list[str] = []
self.sigterm = False
- self.source: Optional[List[str]] = None
- self.source_pkgs: List[str] = []
+ self.source: list[str] | None = None
+ self.source_pkgs: list[str] = []
self.timid = False
- self._crash: Optional[str] = None
+ self._crash: str | None = None
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
- self.exclude_also: List[str] = []
+ self.exclude_also: list[str] = []
self.fail_under = 0.0
- self.format: Optional[str] = None
+ self.format: str | None = None
self.ignore_errors = False
self.include_namespace_packages = False
- self.report_include: Optional[List[str]] = None
- self.report_omit: Optional[List[str]] = None
+ self.report_include: list[str] | None = None
+ self.report_omit: list[str] | None = None
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.partial_list = DEFAULT_PARTIAL[:]
self.precision = 0
- self.report_contexts: Optional[List[str]] = None
+ self.report_contexts: list[str] | None = None
self.show_missing = False
self.skip_covered = False
self.skip_empty = False
- self.sort: Optional[str] = None
+ self.sort: str | None = None
# Defaults for [html]
- self.extra_css: Optional[str] = None
+ self.extra_css: str | None = None
self.html_dir = "htmlcov"
- self.html_skip_covered: Optional[bool] = None
- self.html_skip_empty: Optional[bool] = None
+ self.html_skip_covered: bool | None = None
+ self.html_skip_empty: bool | None = None
self.html_title = "Coverage report"
self.show_contexts = False
@@ -252,10 +252,10 @@ def __init__(self) -> None:
self.lcov_output = "coverage.lcov"
# Defaults for [paths]
- self.paths: Dict[str, List[str]] = {}
+ self.paths: dict[str, list[str]] = {}
# Options for plugins
- self.plugin_options: Dict[str, TConfigSectionOut] = {}
+ self.plugin_options: dict[str, TConfigSectionOut] = {}
MUST_BE_LIST = {
"debug", "concurrency", "plugins",
@@ -453,7 +453,7 @@ def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
"""Get a dictionary of options for the plugin named `plugin`."""
return self.plugin_options.get(plugin, {})
- def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None:
+ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -484,7 +484,7 @@ def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSecti
# If we get here, we didn't find the option.
raise ConfigError(f"No such option: {option_name!r}")
- def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
+ def get_option(self, option_name: str) -> TConfigValueOut | None:
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -527,14 +527,14 @@ def post_process(self) -> None:
}
self.exclude_list += self.exclude_also
- def debug_info(self) -> List[Tuple[str, Any]]:
+ def debug_info(self) -> list[tuple[str, Any]]:
"""Make a list of (name, value) pairs for writing debug info."""
return human_sorted_items(
(k, v) for k, v in self.__dict__.items() if not k.startswith("_")
)
-def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool, bool]]:
+def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]:
"""What config files should we try to read?
Returns a list of tuples:
@@ -566,7 +566,7 @@ def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool,
def read_coverage_config(
- config_file: Union[bool, str],
+ config_file: bool | str,
warn: Callable[[str], None],
**kwargs: TConfigValueIn,
) -> CoverageConfig:
diff --git a/coverage/context.py b/coverage/context.py
index 20a5c92d0..c8ee71271 100644
--- a/coverage/context.py
+++ b/coverage/context.py
@@ -6,12 +6,12 @@
from __future__ import annotations
from types import FrameType
-from typing import cast, Callable, Optional, Sequence
+from typing import cast, Callable, Sequence
def combine_context_switchers(
- context_switchers: Sequence[Callable[[FrameType], Optional[str]]],
-) -> Optional[Callable[[FrameType], Optional[str]]]:
+ context_switchers: Sequence[Callable[[FrameType], str | None]],
+) -> Callable[[FrameType], str | None] | None:
"""Create a single context switcher from multiple switchers.
`context_switchers` is a list of functions that take a frame as an
@@ -30,7 +30,7 @@ def combine_context_switchers(
if len(context_switchers) == 1:
return context_switchers[0]
- def should_start_context(frame: FrameType) -> Optional[str]:
+ def should_start_context(frame: FrameType) -> str | None:
"""The combiner for multiple context switchers."""
for switcher in context_switchers:
new_context = switcher(frame)
@@ -41,7 +41,7 @@ def should_start_context(frame: FrameType) -> Optional[str]:
return should_start_context
-def should_start_context_test_function(frame: FrameType) -> Optional[str]:
+def should_start_context_test_function(frame: FrameType) -> str | None:
"""Is this frame calling a test_* function?"""
co_name = frame.f_code.co_name
if co_name.startswith("test") or co_name == "runTest":
@@ -49,7 +49,7 @@ def should_start_context_test_function(frame: FrameType) -> Optional[str]:
return None
-def qualname_from_frame(frame: FrameType) -> Optional[str]:
+def qualname_from_frame(frame: FrameType) -> str | None:
"""Get a qualified name for the code running in `frame`."""
co = frame.f_code
fname = co.co_name
diff --git a/coverage/control.py b/coverage/control.py
index e5da10227..7b790ea43 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -20,7 +20,7 @@
from types import FrameType
from typing import (
cast,
- Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, Tuple, Union,
+ Any, Callable, IO, Iterable, Iterator, List,
)
from coverage import env
@@ -104,10 +104,10 @@ class Coverage(TConfigurable):
"""
# The stack of started Coverage instances.
- _instances: List[Coverage] = []
+ _instances: list[Coverage] = []
@classmethod
- def current(cls) -> Optional[Coverage]:
+ def current(cls) -> Coverage | None:
"""Get the latest started `Coverage` instance, if any.
Returns: a `Coverage` instance, or None.
@@ -122,21 +122,21 @@ def current(cls) -> Optional[Coverage]:
def __init__( # pylint: disable=too-many-arguments
self,
- data_file: Optional[Union[FilePath, DefaultValue]] = DEFAULT_DATAFILE,
- data_suffix: Optional[Union[str, bool]] = None,
- cover_pylib: Optional[bool] = None,
+ data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE,
+ data_suffix: str | bool | None = None,
+ cover_pylib: bool | None = None,
auto_data: bool = False,
- timid: Optional[bool] = None,
- branch: Optional[bool] = None,
- config_file: Union[FilePath, bool] = True,
- source: Optional[Iterable[str]] = None,
- source_pkgs: Optional[Iterable[str]] = None,
- omit: Optional[Union[str, Iterable[str]]] = None,
- include: Optional[Union[str, Iterable[str]]] = None,
- debug: Optional[Iterable[str]] = None,
- concurrency: Optional[Union[str, Iterable[str]]] = None,
+ timid: bool | None = None,
+ branch: bool | None = None,
+ config_file: FilePath | bool = True,
+ source: Iterable[str] | None = None,
+ source_pkgs: Iterable[str] | None = None,
+ omit: str | Iterable[str] | None = None,
+ include: str | Iterable[str] | None = None,
+ debug: Iterable[str] | None = None,
+ concurrency: str | Iterable[str] | None = None,
check_preimported: bool = False,
- context: Optional[str] = None,
+ context: str | None = None,
messages: bool = False,
) -> None:
"""
@@ -240,7 +240,7 @@ def __init__( # pylint: disable=too-many-arguments
data_file = os.fspath(data_file)
# This is injectable by tests.
- self._debug_file: Optional[IO[str]] = None
+ self._debug_file: IO[str] | None = None
self._auto_load = self._auto_save = auto_data
self._data_suffix_specified = data_suffix
@@ -249,25 +249,25 @@ def __init__( # pylint: disable=too-many-arguments
self._warn_no_data = True
self._warn_unimported_source = True
self._warn_preimported_source = check_preimported
- self._no_warn_slugs: List[str] = []
+ self._no_warn_slugs: list[str] = []
self._messages = messages
# A record of all the warnings that have been issued.
- self._warnings: List[str] = []
+ self._warnings: list[str] = []
# Other instance attributes, set with placebos or placeholders.
# More useful objects will be created later.
self._debug: DebugControl = NoDebugging()
- self._inorout: Optional[InOrOut] = None
+ self._inorout: InOrOut | None = None
self._plugins: Plugins = Plugins()
- self._data: Optional[CoverageData] = None
- self._collector: Optional[Collector] = None
+ self._data: CoverageData | None = None
+ self._collector: Collector | None = None
self._metacov = False
self._file_mapper: Callable[[str], str] = abs_file
self._data_suffix = self._run_suffix = None
- self._exclude_re: Dict[str, str] = {}
- self._old_sigterm: Optional[Callable[[int, Optional[FrameType]], Any]] = None
+ self._exclude_re: dict[str, str] = {}
+ self._old_sigterm: Callable[[int, FrameType | None], Any] | None = None
# State machine variables:
# Have we initialized everything?
@@ -413,7 +413,7 @@ def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool:
return not reason
- def _warn(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None:
+ def _warn(self, msg: str, slug: str | None = None, once: bool = False) -> None:
"""Use `msg` as a warning.
For warning suppression, use `slug` as the shorthand.
@@ -445,7 +445,7 @@ def _message(self, msg: str) -> None:
if self._messages:
print(msg)
- def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
+ def get_option(self, option_name: str) -> TConfigValueOut | None:
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -463,7 +463,7 @@ def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
"""
return self.config.get_option(option_name)
- def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None:
+ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -511,7 +511,7 @@ def load(self) -> None:
def _init_for_start(self) -> None:
"""Initialization for start()"""
# Construct the collector.
- concurrency: List[str] = self.config.concurrency or []
+ concurrency: list[str] = self.config.concurrency or []
if "multiprocessing" in concurrency:
if self.config.config_file is None:
raise ConfigError("multiprocessing requires a configuration file")
@@ -600,7 +600,7 @@ def _init_for_start(self) -> None:
signal.SIGTERM, self._on_sigterm,
)
- def _init_data(self, suffix: Optional[Union[str, bool]]) -> None:
+ def _init_data(self, suffix: str | bool | None) -> None:
"""Create a data file if we don't have one yet."""
if self._data is None:
# Create the data file. We do this at construction time so that the
@@ -685,7 +685,7 @@ def _atexit(self, event: str = "atexit") -> None:
if self._auto_save or event == "sigterm":
self.save()
- def _on_sigterm(self, signum_unused: int, frame_unused: Optional[FrameType]) -> None:
+ def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> None:
"""A handler for signal.SIGTERM."""
self._atexit("sigterm")
# Statements after here won't be seen by metacov because we just wrote
@@ -769,7 +769,7 @@ def _exclude_regex(self, which: str) -> str:
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
- def get_exclude_list(self, which: str = "exclude") -> List[str]:
+ def get_exclude_list(self, which: str = "exclude") -> list[str]:
"""Return a list of excluded regex strings.
`which` indicates which list is desired. See :meth:`exclude` for the
@@ -798,7 +798,7 @@ def _make_aliases(self) -> PathAliases:
def combine(
self,
- data_paths: Optional[Iterable[str]] = None,
+ data_paths: Iterable[str] | None = None,
strict: bool = False,
keep: bool = False,
) -> None:
@@ -896,7 +896,7 @@ def _post_save_work(self) -> None:
self._data.touch_files(paths, plugin_name)
# Backward compatibility with version 1.
- def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str]:
+ def analysis(self, morf: TMorf) -> tuple[str, list[TLineNo], list[TLineNo], str]:
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
@@ -904,7 +904,7 @@ def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str]
def analysis2(
self,
morf: TMorf,
- ) -> Tuple[str, List[TLineNo], List[TLineNo], List[TLineNo], str]:
+ ) -> tuple[str, list[TLineNo], list[TLineNo], list[TLineNo], str]:
"""Analyze a module.
`morf` is a module or a file name. It will be analyzed to determine
@@ -930,7 +930,7 @@ def analysis2(
analysis.missing_formatted(),
)
- def _analyze(self, it: Union[FileReporter, TMorf]) -> Analysis:
+ def _analyze(self, it: FileReporter | TMorf) -> Analysis:
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
@@ -952,7 +952,7 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter:
"""Get a FileReporter for a module or file name."""
assert self._data is not None
plugin = None
- file_reporter: Union[str, FileReporter] = "python"
+ file_reporter: str | FileReporter = "python"
if isinstance(morf, str):
mapped_morf = self._file_mapper(morf)
@@ -975,7 +975,7 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter:
assert isinstance(file_reporter, FileReporter)
return file_reporter
- def _get_file_reporters(self, morfs: Optional[Iterable[TMorf]] = None) -> List[FileReporter]:
+ def _get_file_reporters(self, morfs: Iterable[TMorf] | None = None) -> list[FileReporter]:
"""Get a list of FileReporters for a list of modules or file names.
For each module or file name in `morfs`, find a FileReporter. Return
@@ -1007,18 +1007,18 @@ def _prepare_data_for_reporting(self) -> None:
def report(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- show_missing: Optional[bool] = None,
- ignore_errors: Optional[bool] = None,
- file: Optional[IO[str]] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- skip_covered: Optional[bool] = None,
- contexts: Optional[List[str]] = None,
- skip_empty: Optional[bool] = None,
- precision: Optional[int] = None,
- sort: Optional[str] = None,
- output_format: Optional[str] = None,
+ morfs: Iterable[TMorf] | None = None,
+ show_missing: bool | None = None,
+ ignore_errors: bool | None = None,
+ file: IO[str] | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ skip_covered: bool | None = None,
+ contexts: list[str] | None = None,
+ skip_empty: bool | None = None,
+ precision: int | None = None,
+ sort: str | None = None,
+ output_format: str | None = None,
) -> float:
"""Write a textual summary report to `file`.
@@ -1089,12 +1089,12 @@ def report(
def annotate(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- directory: Optional[str] = None,
- ignore_errors: Optional[bool] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- contexts: Optional[List[str]] = None,
+ morfs: Iterable[TMorf] | None = None,
+ directory: str | None = None,
+ ignore_errors: bool | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ contexts: list[str] | None = None,
) -> None:
"""Annotate a list of modules.
@@ -1125,18 +1125,18 @@ def annotate(
def html_report(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- directory: Optional[str] = None,
- ignore_errors: Optional[bool] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- extra_css: Optional[str] = None,
- title: Optional[str] = None,
- skip_covered: Optional[bool] = None,
- show_contexts: Optional[bool] = None,
- contexts: Optional[List[str]] = None,
- skip_empty: Optional[bool] = None,
- precision: Optional[int] = None,
+ morfs: Iterable[TMorf] | None = None,
+ directory: str | None = None,
+ ignore_errors: bool | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ extra_css: str | None = None,
+ title: str | None = None,
+ skip_covered: bool | None = None,
+ show_contexts: bool | None = None,
+ contexts: list[str] | None = None,
+ skip_empty: bool | None = None,
+ precision: int | None = None,
) -> float:
"""Generate an HTML report.
@@ -1183,13 +1183,13 @@ def html_report(
def xml_report(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- outfile: Optional[str] = None,
- ignore_errors: Optional[bool] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- contexts: Optional[List[str]] = None,
- skip_empty: Optional[bool] = None,
+ morfs: Iterable[TMorf] | None = None,
+ outfile: str | None = None,
+ ignore_errors: bool | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ contexts: list[str] | None = None,
+ skip_empty: bool | None = None,
) -> float:
"""Generate an XML report of coverage results.
@@ -1217,14 +1217,14 @@ def xml_report(
def json_report(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- outfile: Optional[str] = None,
- ignore_errors: Optional[bool] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- contexts: Optional[List[str]] = None,
- pretty_print: Optional[bool] = None,
- show_contexts: Optional[bool] = None,
+ morfs: Iterable[TMorf] | None = None,
+ outfile: str | None = None,
+ ignore_errors: bool | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ contexts: list[str] | None = None,
+ pretty_print: bool | None = None,
+ show_contexts: bool | None = None,
) -> float:
"""Generate a JSON report of coverage results.
@@ -1255,12 +1255,12 @@ def json_report(
def lcov_report(
self,
- morfs: Optional[Iterable[TMorf]] = None,
- outfile: Optional[str] = None,
- ignore_errors: Optional[bool] = None,
- omit: Optional[Union[str, List[str]]] = None,
- include: Optional[Union[str, List[str]]] = None,
- contexts: Optional[List[str]] = None,
+ morfs: Iterable[TMorf] | None = None,
+ outfile: str | None = None,
+ ignore_errors: bool | None = None,
+ omit: str | list[str] | None = None,
+ include: str | list[str] | None = None,
+ contexts: list[str] | None = None,
) -> float:
"""Generate an LCOV report of coverage results.
@@ -1282,7 +1282,7 @@ def lcov_report(
):
return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message)
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
@@ -1290,7 +1290,7 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]:
self._init()
self._post_init()
- def plugin_info(plugins: List[Any]) -> List[str]:
+ def plugin_info(plugins: list[Any]) -> list[str]:
"""Make an entry for the sys_info from a list of plug-ins."""
entries = []
for plugin in plugins:
@@ -1347,7 +1347,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
)(Coverage)
-def process_startup() -> Optional[Coverage]:
+def process_startup() -> Coverage | None:
"""Call this at Python start-up to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
diff --git a/coverage/data.py b/coverage/data.py
index 0868173b6..0db07d156 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -16,7 +16,7 @@
import hashlib
import os.path
-from typing import Callable, Dict, Iterable, List, Optional
+from typing import Callable, Iterable
from coverage.exceptions import CoverageException, NoDataError
from coverage.files import PathAliases
@@ -24,7 +24,7 @@
from coverage.sqldata import CoverageData
-def line_counts(data: CoverageData, fullpath: bool = False) -> Dict[str, int]:
+def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]:
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
@@ -63,7 +63,7 @@ def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None:
hasher.update(data.file_tracer(filename))
-def combinable_files(data_file: str, data_paths: Optional[Iterable[str]] = None) -> List[str]:
+def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]:
"""Make a list of data files to be combined.
`data_file` is a path to a data file. `data_paths` is a list of files or
@@ -93,11 +93,11 @@ def combinable_files(data_file: str, data_paths: Optional[Iterable[str]] = None)
def combine_parallel_data(
data: CoverageData,
- aliases: Optional[PathAliases] = None,
- data_paths: Optional[Iterable[str]] = None,
+ aliases: PathAliases | None = None,
+ data_paths: Iterable[str] | None = None,
strict: bool = False,
keep: bool = False,
- message: Optional[Callable[[str], None]] = None,
+ message: Callable[[str], None] | None = None,
) -> None:
"""Combine a number of data files together.
@@ -212,7 +212,7 @@ def debug_data_file(filename: str) -> None:
print(line)
-def sorted_lines(data: CoverageData, filename: str) -> List[int]:
+def sorted_lines(data: CoverageData, filename: str) -> list[int]:
"""Get the sorted lines for a file, for tests."""
lines = data.lines(filename)
return sorted(lines or [])
diff --git a/coverage/debug.py b/coverage/debug.py
index 0895c570f..e4bed8b42 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -21,7 +21,7 @@
from typing import (
overload,
- Any, Callable, IO, Iterable, Iterator, Mapping, Optional, List, Tuple,
+ Any, Callable, IO, Iterable, Iterator, Mapping,
)
from coverage.misc import human_sorted_items, isolate_module
@@ -33,7 +33,7 @@
# When debugging, it can be helpful to force some options, especially when
# debugging the configuration mechanisms you usually use to control debugging!
# This is a list of forced debugging options.
-FORCED_DEBUG: List[str] = []
+FORCED_DEBUG: list[str] = []
FORCED_DEBUG_FILE = None
@@ -45,8 +45,8 @@ class DebugControl:
def __init__(
self,
options: Iterable[str],
- output: Optional[IO[str]],
- file_name: Optional[str] = None,
+ output: IO[str] | None,
+ file_name: str | None = None,
) -> None:
"""Configure the options and output file for debugging."""
self.options = list(options) + FORCED_DEBUG
@@ -87,7 +87,7 @@ def without_callers(self) -> Iterator[None]:
finally:
self.suppress_callers = old
- def write(self, msg: str, *, exc: Optional[BaseException] = None) -> None:
+ def write(self, msg: str, *, exc: BaseException | None = None) -> None:
"""Write a line of debug output.
`msg` is the line to write. A newline will be appended.
@@ -118,7 +118,7 @@ def should(self, option: str) -> bool:
"""Should we write debug messages? Never."""
return False
- def write(self, msg: str, *, exc: Optional[BaseException] = None) -> None:
+ def write(self, msg: str, *, exc: BaseException | None = None) -> None:
"""This will never be called."""
raise AssertionError("NoDebugging.write should never be called.")
@@ -128,7 +128,7 @@ def info_header(label: str) -> str:
return "--{:-<60s}".format(" "+label+" ")
-def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]:
+def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]:
"""Produce a sequence of formatted lines from info.
`info` is a sequence of pairs (label, data). The produced lines are
@@ -158,7 +158,7 @@ def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]:
def write_formatted_info(
write: Callable[[str], None],
header: str,
- info: Iterable[Tuple[str, Any]],
+ info: Iterable[tuple[str, Any]],
) -> None:
"""Write a sequence of (label,data) pairs nicely.
@@ -179,10 +179,10 @@ def exc_one_line(exc: Exception) -> str:
return "|".join(l.rstrip() for l in lines)
-_FILENAME_REGEXES: List[Tuple[str, str]] = [
+_FILENAME_REGEXES: list[tuple[str, str]] = [
(r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"),
]
-_FILENAME_SUBS: List[Tuple[str, str]] = []
+_FILENAME_SUBS: list[tuple[str, str]] = []
@overload
def short_filename(filename: str) -> str:
@@ -192,7 +192,7 @@ def short_filename(filename: str) -> str:
def short_filename(filename: None) -> None:
pass
-def short_filename(filename: Optional[str]) -> Optional[str]:
+def short_filename(filename: str | None) -> str | None:
"""Shorten a file name. Directories are replaced by prefixes like 'syspath:'"""
if not _FILENAME_SUBS:
for pathdir in sys.path:
@@ -350,7 +350,7 @@ def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str:
class CwdTracker:
"""A class to add cwd info to debug messages."""
def __init__(self) -> None:
- self.cwd: Optional[str] = None
+ self.cwd: str | None = None
def filter(self, text: str) -> str:
"""Add a cwd message for each new cwd."""
@@ -393,7 +393,7 @@ def filter(self, text: str) -> str:
class PytestTracker:
"""Track the current pytest test name to add to debug messages."""
def __init__(self) -> None:
- self.test_name: Optional[str] = None
+ self.test_name: str | None = None
def filter(self, text: str) -> str:
"""Add a message when the pytest test changes."""
@@ -408,7 +408,7 @@ class DebugOutputFile:
"""A file-like object that includes pid and cwd information."""
def __init__(
self,
- outfile: Optional[IO[str]],
+ outfile: IO[str] | None,
filters: Iterable[Callable[[str], str]],
):
self.outfile = outfile
@@ -418,8 +418,8 @@ def __init__(
@classmethod
def get_one(
cls,
- fileobj: Optional[IO[str]] = None,
- file_name: Optional[str] = None,
+ fileobj: IO[str] | None = None,
+ file_name: str | None = None,
filters: Iterable[Callable[[str], str]] = (),
interim: bool = False,
) -> DebugOutputFile:
@@ -478,7 +478,7 @@ def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None:
sys.modules[cls.SYS_MOD_NAME] = singleton_module
@classmethod
- def _get_singleton_data(cls) -> Tuple[Optional[DebugOutputFile], bool]:
+ def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]:
"""Get the one DebugOutputFile."""
singleton_module = sys.modules.get(cls.SYS_MOD_NAME)
return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True))
@@ -582,7 +582,7 @@ def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
return _decorator
-def relevant_environment_display(env: Mapping[str, str]) -> List[Tuple[str, str]]:
+def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]:
"""Filter environment variables for a debug display.
Select variables to display (with COV or PY in the name, or HOME, TEMP, or
diff --git a/coverage/disposition.py b/coverage/disposition.py
index 3cc6c8d68..7aa15e97a 100644
--- a/coverage/disposition.py
+++ b/coverage/disposition.py
@@ -5,7 +5,7 @@
from __future__ import annotations
-from typing import Optional, Type, TYPE_CHECKING
+from typing import TYPE_CHECKING
from coverage.types import TFileDisposition
@@ -18,10 +18,10 @@ class FileDisposition:
original_filename: str
canonical_filename: str
- source_filename: Optional[str]
+ source_filename: str | None
trace: bool
reason: str
- file_tracer: Optional[FileTracer]
+ file_tracer: FileTracer | None
has_dynamic_filename: bool
def __repr__(self) -> str:
@@ -32,7 +32,7 @@ def __repr__(self) -> str:
# be implemented in either C or Python. Acting on them is done with these
# functions.
-def disposition_init(cls: Type[TFileDisposition], original_filename: str) -> TFileDisposition:
+def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition:
"""Construct and initialize a new FileDisposition object."""
disp = cls()
disp.original_filename = original_filename
diff --git a/coverage/env.py b/coverage/env.py
index b6b9caca3..063f9d15b 100644
--- a/coverage/env.py
+++ b/coverage/env.py
@@ -9,7 +9,7 @@
import platform
import sys
-from typing import Any, Iterable, Tuple
+from typing import Any, Iterable
# debug_info() at the bottom wants to show all the globals, but not imports.
# Grab the global names here to know which names to not show. Nothing defined
@@ -134,7 +134,7 @@ class PYBEHAVIOR:
TESTING = os.getenv("COVERAGE_TESTING") == "True"
-def debug_info() -> Iterable[Tuple[str, Any]]:
+def debug_info() -> Iterable[tuple[str, Any]]:
"""Return a list of (name, value) pairs for printing debug information."""
info = [
(name, value) for name, value in globals().items()
diff --git a/coverage/execfile.py b/coverage/execfile.py
index 966fc680c..7011c70f9 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -15,7 +15,7 @@
from importlib.machinery import ModuleSpec
from types import CodeType, ModuleType
-from typing import Any, List, Optional, Tuple
+from typing import Any
from coverage import env
from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource
@@ -39,7 +39,7 @@ def __init__(self, fullname: str, *_args: Any) -> None:
def find_module(
modulename: str,
-) -> Tuple[Optional[str], str, ModuleSpec]:
+) -> tuple[str | None, str, ModuleSpec]:
"""Find the module named `modulename`.
Returns the file path of the module, the name of the enclosing
@@ -73,23 +73,23 @@ class PyRunner:
This is meant to emulate real Python execution as closely as possible.
"""
- def __init__(self, args: List[str], as_module: bool = False) -> None:
+ def __init__(self, args: list[str], as_module: bool = False) -> None:
self.args = args
self.as_module = as_module
self.arg0 = args[0]
- self.package: Optional[str] = None
- self.modulename: Optional[str] = None
- self.pathname: Optional[str] = None
- self.loader: Optional[DummyLoader] = None
- self.spec: Optional[ModuleSpec] = None
+ self.package: str | None = None
+ self.modulename: str | None = None
+ self.pathname: str | None = None
+ self.loader: DummyLoader | None = None
+ self.spec: ModuleSpec | None = None
def prepare(self) -> None:
"""Set sys.path properly.
This needs to happen before any importing, and without importing anything.
"""
- path0: Optional[str]
+ path0: str | None
if self.as_module:
path0 = os.getcwd()
elif os.path.isdir(self.arg0):
@@ -257,7 +257,7 @@ def run(self) -> None:
os.chdir(cwd)
-def run_python_module(args: List[str]) -> None:
+def run_python_module(args: list[str]) -> None:
"""Run a Python module, as though with ``python -m name args...``.
`args` is the argument array to present as sys.argv, including the first
@@ -271,7 +271,7 @@ def run_python_module(args: List[str]) -> None:
runner.run()
-def run_python_file(args: List[str]) -> None:
+def run_python_file(args: list[str]) -> None:
"""Run a Python file as if it were the main program on the command line.
`args` is the argument array to present as sys.argv, including the first
diff --git a/coverage/files.py b/coverage/files.py
index 9ae17a58a..71352b8eb 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -13,7 +13,7 @@
import re
import sys
-from typing import Callable, Dict, Iterable, List, Optional, Tuple
+from typing import Callable, Iterable
from coverage import env
from coverage.exceptions import ConfigError
@@ -24,7 +24,7 @@
RELATIVE_DIR: str = ""
-CANONICAL_FILENAME_CACHE: Dict[str, str] = {}
+CANONICAL_FILENAME_CACHE: dict[str, str] = {}
def set_relative_directory() -> None:
"""Set the directory that `relative_filename` will be relative to."""
@@ -110,8 +110,8 @@ def flat_rootname(filename: str) -> str:
if env.WINDOWS:
- _ACTUAL_PATH_CACHE: Dict[str, str] = {}
- _ACTUAL_PATH_LIST_CACHE: Dict[str, List[str]] = {}
+ _ACTUAL_PATH_CACHE: dict[str, str] = {}
+ _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {}
def actual_path(path: str) -> str:
"""Get the actual path of `path`, including the correct case."""
@@ -156,7 +156,7 @@ def abs_file(path: str) -> str:
return actual_path(os.path.abspath(os.path.realpath(path)))
-def zip_location(filename: str) -> Optional[Tuple[str, str]]:
+def zip_location(filename: str) -> tuple[str, str] | None:
"""Split a filename into a zipfile / inner name pair.
Only return a pair if the zipfile exists. No check is made if the inner
@@ -197,7 +197,7 @@ def isabs_anywhere(filename: str) -> bool:
return ntpath.isabs(filename) or posixpath.isabs(filename)
-def prep_patterns(patterns: Iterable[str]) -> List[str]:
+def prep_patterns(patterns: Iterable[str]) -> list[str]:
"""Prepare the file patterns for use in a `GlobMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
@@ -224,7 +224,7 @@ class TreeMatcher:
"""
def __init__(self, paths: Iterable[str], name: str = "unknown") -> None:
- self.original_paths: List[str] = human_sorted(paths)
+ self.original_paths: list[str] = human_sorted(paths)
#self.paths = list(map(os.path.normcase, paths))
self.paths = [os.path.normcase(p) for p in paths]
self.name = name
@@ -232,7 +232,7 @@ def __init__(self, paths: Iterable[str], name: str = "unknown") -> None:
def __repr__(self) -> str:
return f""
- def info(self) -> List[str]:
+ def info(self) -> list[str]:
"""A list of strings for displaying when dumping state."""
return self.original_paths
@@ -259,7 +259,7 @@ def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None:
def __repr__(self) -> str:
return f""
- def info(self) -> List[str]:
+ def info(self) -> list[str]:
"""A list of strings for displaying when dumping state."""
return self.modules
@@ -289,7 +289,7 @@ def __init__(self, pats: Iterable[str], name: str = "unknown") -> None:
def __repr__(self) -> str:
return f""
- def info(self) -> List[str]:
+ def info(self) -> list[str]:
"""A list of strings for displaying when dumping state."""
return self.pats
@@ -389,11 +389,11 @@ class PathAliases:
"""
def __init__(
self,
- debugfn: Optional[Callable[[str], None]] = None,
+ debugfn: Callable[[str], None] | None = None,
relative: bool = False,
) -> None:
# A list of (original_pattern, regex, result)
- self.aliases: List[Tuple[str, re.Pattern[str], str]] = []
+ self.aliases: list[tuple[str, re.Pattern[str], str]] = []
self.debugfn = debugfn or (lambda msg: 0)
self.relative = relative
self.pprinted = False
diff --git a/coverage/html.py b/coverage/html.py
index 1db62b3e0..e2bae1d6b 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -15,7 +15,7 @@
import string
from dataclasses import dataclass
-from typing import Any, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING, cast
+from typing import Any, Iterable, TYPE_CHECKING, cast
import coverage
from coverage.data import CoverageData, add_data_to_hash
@@ -77,19 +77,19 @@ def write_html(fname: str, html: str) -> None:
@dataclass
class LineData:
"""The data for each source line of HTML output."""
- tokens: List[Tuple[str, str]]
+ tokens: list[tuple[str, str]]
number: TLineNo
category: str
statement: bool
- contexts: List[str]
+ contexts: list[str]
contexts_label: str
- context_list: List[str]
- short_annotations: List[str]
- long_annotations: List[str]
+ context_list: list[str]
+ short_annotations: list[str]
+ long_annotations: list[str]
html: str = ""
- context_str: Optional[str] = None
- annotate: Optional[str] = None
- annotate_long: Optional[str] = None
+ context_str: str | None = None
+ annotate: str | None = None
+ annotate_long: str | None = None
css_class: str = ""
@@ -98,7 +98,7 @@ class FileData:
"""The data for each source file of HTML output."""
relative_filename: str
nums: Numbers
- lines: List[LineData]
+ lines: list[LineData]
class HtmlDataGeneration:
@@ -233,7 +233,7 @@ def __init__(self, cov: Coverage) -> None:
title = self.config.html_title
- self.extra_css: Optional[str]
+ self.extra_css: str | None
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
else:
@@ -242,8 +242,8 @@ def __init__(self, cov: Coverage) -> None:
self.data = self.coverage.get_data()
self.has_arcs = self.data.has_arcs()
- self.file_summaries: List[IndexInfoDict] = []
- self.all_files_nums: List[Numbers] = []
+ self.file_summaries: list[IndexInfoDict] = []
+ self.all_files_nums: list[Numbers] = []
self.incr = IncrementalChecker(self.directory)
self.datagen = HtmlDataGeneration(self.coverage)
self.totals = Numbers(precision=self.config.precision)
@@ -278,7 +278,7 @@ def __init__(self, cov: Coverage) -> None:
self.pyfile_html_source = read_data("pyfile.html")
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
- def report(self, morfs: Optional[Iterable[TMorf]]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None) -> float:
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
@@ -546,7 +546,7 @@ def __init__(self, directory: str) -> None:
def reset(self) -> None:
"""Initialize to empty. Causes all files to be reported."""
self.globals = ""
- self.files: Dict[str, FileInfoDict] = {}
+ self.files: dict[str, FileInfoDict] = {}
def read(self) -> None:
"""Read the information we stored last time."""
@@ -651,6 +651,6 @@ def escape(t: str) -> str:
return t.replace("&", "&").replace("<", "<")
-def pair(ratio: Tuple[int, int]) -> str:
+def pair(ratio: tuple[int, int]) -> str:
"""Format a pair of numbers so JavaScript can read them in an attribute."""
- return "%s %s" % ratio
+ return "{} {}".format(*ratio)
diff --git a/coverage/inorout.py b/coverage/inorout.py
index 4359570c2..5ea29edf1 100644
--- a/coverage/inorout.py
+++ b/coverage/inorout.py
@@ -17,7 +17,7 @@
from types import FrameType, ModuleType
from typing import (
- cast, Any, Iterable, List, Optional, Set, Tuple, Type, TYPE_CHECKING,
+ cast, Any, Iterable, TYPE_CHECKING,
)
from coverage import env
@@ -38,7 +38,7 @@
# when deciding where the stdlib is. These modules are not used for anything,
# they are modules importable from the pypy lib directories, so that we can
# find those directories.
-modules_we_happen_to_have: List[ModuleType] = [
+modules_we_happen_to_have: list[ModuleType] = [
inspect, itertools, os, platform, re, sysconfig, traceback,
]
@@ -70,7 +70,7 @@ def canonical_path(morf: TMorf, directory: bool = False) -> str:
return morf_path
-def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
+def name_for_module(filename: str, frame: FrameType | None) -> str:
"""Get the name of the module for a filename and frame.
For configurability's sake, we allow __main__ modules to be matched by
@@ -117,7 +117,7 @@ def module_has_file(mod: ModuleType) -> bool:
return os.path.exists(mod__file__)
-def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]:
+def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]:
"""Find the file and search path for `modulename`.
Returns:
@@ -138,7 +138,7 @@ def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]
return filename, path
-def add_stdlib_paths(paths: Set[str]) -> None:
+def add_stdlib_paths(paths: set[str]) -> None:
"""Add paths where the stdlib can be found to the set `paths`."""
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
@@ -150,7 +150,7 @@ def add_stdlib_paths(paths: Set[str]) -> None:
paths.add(canonical_path(m, directory=True))
-def add_third_party_paths(paths: Set[str]) -> None:
+def add_third_party_paths(paths: set[str]) -> None:
"""Add locations for third-party packages to the set `paths`."""
# Get the paths that sysconfig knows about.
scheme_names = set(sysconfig.get_scheme_names())
@@ -164,7 +164,7 @@ def add_third_party_paths(paths: Set[str]) -> None:
paths.add(config_paths[path_name])
-def add_coverage_paths(paths: Set[str]) -> None:
+def add_coverage_paths(paths: set[str]) -> None:
"""Add paths where coverage.py code can be found to the set `paths`."""
cover_path = canonical_path(__file__, directory=True)
paths.add(cover_path)
@@ -180,15 +180,15 @@ def __init__(
self,
config: CoverageConfig,
warn: TWarnFn,
- debug: Optional[TDebugCtl],
+ debug: TDebugCtl | None,
include_namespace_packages: bool,
) -> None:
self.warn = warn
self.debug = debug
self.include_namespace_packages = include_namespace_packages
- self.source: List[str] = []
- self.source_pkgs: List[str] = []
+ self.source: list[str] = []
+ self.source_pkgs: list[str] = []
self.source_pkgs.extend(config.source_pkgs)
for src in config.source or []:
if os.path.isdir(src):
@@ -201,17 +201,17 @@ def __init__(
self.omit = prep_patterns(config.run_omit)
# The directories for files considered "installed with the interpreter".
- self.pylib_paths: Set[str] = set()
+ self.pylib_paths: set[str] = set()
if not config.cover_pylib:
add_stdlib_paths(self.pylib_paths)
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
- self.cover_paths: Set[str] = set()
+ self.cover_paths: set[str] = set()
add_coverage_paths(self.cover_paths)
# Find where third-party packages are installed.
- self.third_paths: Set[str] = set()
+ self.third_paths: set[str] = set()
add_third_party_paths(self.third_paths)
def _debug(msg: str) -> None:
@@ -289,9 +289,9 @@ def _debug(msg: str) -> None:
_debug(f"Source in third-party matching: {self.source_in_third_match}")
self.plugins: Plugins
- self.disp_class: Type[TFileDisposition] = FileDisposition
+ self.disp_class: type[TFileDisposition] = FileDisposition
- def should_trace(self, filename: str, frame: Optional[FrameType] = None) -> TFileDisposition:
+ def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition:
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
@@ -388,7 +388,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
return disp
- def check_include_omit_etc(self, filename: str, frame: Optional[FrameType]) -> Optional[str]:
+ def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None:
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
@@ -518,7 +518,7 @@ def _warn_about_unmeasured_code(self, pkg: str) -> None:
msg = f"Module {pkg} was previously imported, but not measured"
self.warn(msg, slug="module-not-measured")
- def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]:
+ def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]:
"""Find files in the areas of interest that might be untraced.
Yields pairs: file path, and responsible plug-in name.
@@ -533,13 +533,13 @@ def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]:
for src in self.source:
yield from self._find_executable_files(src)
- def _find_plugin_files(self, src_dir: str) -> Iterable[Tuple[str, str]]:
+ def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]:
"""Get executable files from the plugins."""
for plugin in self.plugins.file_tracers:
for x_file in plugin.find_executable_files(src_dir):
yield x_file, plugin._coverage_plugin_name
- def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[str]]]:
+ def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]:
"""Find executable files in `src_dir`.
Search for files in `src_dir` that can be executed because they
@@ -563,7 +563,7 @@ def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[s
continue
yield file_path, plugin_name
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
"""Our information for Coverage.sys_info.
Returns a list of (key, value) pairs.
diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py
index 2a0b9c647..4d4dec7d8 100644
--- a/coverage/jsonreport.py
+++ b/coverage/jsonreport.py
@@ -9,7 +9,7 @@
import json
import sys
-from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
+from typing import Any, IO, Iterable, TYPE_CHECKING
from coverage import __version__
from coverage.report_core import get_analysis_to_report
@@ -34,9 +34,9 @@ def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.total = Numbers(self.config.precision)
- self.report_data: Dict[str, Any] = {}
+ self.report_data: dict[str, Any] = {}
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Generate a json report for `morfs`.
`morfs` is a list of modules or file names.
@@ -89,7 +89,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
return self.total.n_statements and self.total.pc_covered
- def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Dict[str, Any]:
+ def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> dict[str, Any]:
"""Extract the relevant report data for a single file."""
nums = analysis.numbers
self.total += nums
@@ -126,8 +126,8 @@ def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Di
def _convert_branch_arcs(
- branch_arcs: Dict[TLineNo, List[TLineNo]],
-) -> Iterable[Tuple[TLineNo, TLineNo]]:
+ branch_arcs: dict[TLineNo, list[TLineNo]],
+) -> Iterable[tuple[TLineNo, TLineNo]]:
"""Convert branch arcs to a list of two-element tuples."""
for source, targets in branch_arcs.items():
for target in targets:
diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py
index 33eb39871..e54bae8c8 100644
--- a/coverage/lcovreport.py
+++ b/coverage/lcovreport.py
@@ -9,7 +9,7 @@
import hashlib
import sys
-from typing import IO, Iterable, Optional, TYPE_CHECKING
+from typing import IO, Iterable, TYPE_CHECKING
from coverage.plugin import FileReporter
from coverage.report_core import get_analysis_to_report
@@ -35,7 +35,7 @@ def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.total = Numbers(self.coverage.config.precision)
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Renders the full lcov report.
`morfs` is a list of modules or filenames
diff --git a/coverage/misc.py b/coverage/misc.py
index 6175d397e..2b27efc99 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -21,8 +21,7 @@
from types import ModuleType
from typing import (
- Any, Callable, Dict, IO, Iterable, Iterator, List, Mapping, NoReturn, Optional,
- Sequence, Tuple, TypeVar, Union,
+ Any, Callable, IO, Iterable, Iterator, Mapping, NoReturn, Sequence, TypeVar,
)
from coverage import env
@@ -34,7 +33,7 @@
# pylint: disable=unused-wildcard-import
from coverage.exceptions import * # pylint: disable=wildcard-import
-ISOLATED_MODULES: Dict[ModuleType, ModuleType] = {}
+ISOLATED_MODULES: dict[ModuleType, ModuleType] = {}
def isolate_module(mod: ModuleType) -> ModuleType:
@@ -80,7 +79,7 @@ def sys_modules_saved() -> Iterator[None]:
saver.restore()
-def import_third_party(modname: str) -> Tuple[ModuleType, bool]:
+def import_third_party(modname: str) -> tuple[ModuleType, bool]:
"""Import a third-party module we need, but might not be installed.
This also cleans out the module after the import, so that coverage won't
@@ -140,7 +139,7 @@ def _wrapper(self: TSelf) -> TRetVal:
return fn # pragma: not testing
-def bool_or_none(b: Any) -> Optional[bool]:
+def bool_or_none(b: Any) -> bool | None:
"""Return bool(b), but preserve None."""
if b is None:
return None
@@ -180,7 +179,7 @@ def ensure_dir_for_file(path: str) -> None:
ensure_dir(os.path.dirname(path))
-def output_encoding(outfile: Optional[IO[str]] = None) -> str:
+def output_encoding(outfile: IO[str] | None = None) -> str:
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
@@ -318,7 +317,7 @@ def format_local_datetime(dt: datetime.datetime) -> str:
return dt.astimezone().strftime("%Y-%m-%d %H:%M %z")
-def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType:
+def import_local_file(modname: str, modfile: str | None = None) -> ModuleType:
"""Import a local file as a module.
Opens a file in the current directory named `modname`.py, imports it
@@ -338,7 +337,7 @@ def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType
return mod
-def _human_key(s: str) -> Tuple[List[Union[str, int]], str]:
+def _human_key(s: str) -> tuple[list[str | int], str]:
"""Turn a string into a list of string and number chunks.
"z23a" -> (["z", 23, "a"], "z23a")
@@ -346,7 +345,7 @@ def _human_key(s: str) -> Tuple[List[Union[str, int]], str]:
The original string is appended as a last value to ensure the
key is unique enough so that "x1y" and "x001y" can be distinguished.
"""
- def tryint(s: str) -> Union[str, int]:
+ def tryint(s: str) -> str | int:
"""If `s` is a number, return an int, else `s` unchanged."""
try:
return int(s)
@@ -355,7 +354,7 @@ def tryint(s: str) -> Union[str, int]:
return ([tryint(c) for c in re.split(r"(\d+)", s)], s)
-def human_sorted(strings: Iterable[str]) -> List[str]:
+def human_sorted(strings: Iterable[str]) -> list[str]:
"""Sort the given iterable of strings the way that humans expect.
Numeric components in the strings are sorted as numbers.
@@ -370,7 +369,7 @@ def human_sorted(strings: Iterable[str]) -> List[str]:
def human_sorted_items(
items: Iterable[SortableItem],
reverse: bool = False,
-) -> List[SortableItem]:
+) -> list[SortableItem]:
"""Sort (string, ...) items the way humans expect.
The elements of `items` can be any tuple/list. They'll be sorted by the
diff --git a/coverage/multiproc.py b/coverage/multiproc.py
index ab2bc4a17..6d5a82737 100644
--- a/coverage/multiproc.py
+++ b/coverage/multiproc.py
@@ -12,7 +12,7 @@
import sys
import traceback
-from typing import Any, Dict, Optional
+from typing import Any
from coverage.debug import DebugControl
@@ -29,7 +29,7 @@ class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-m
def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def]
"""Wrapper around _bootstrap to start coverage."""
- debug: Optional[DebugControl] = None
+ debug: DebugControl | None = None
try:
from coverage import Coverage # avoid circular import
cov = Coverage(data_suffix=True, auto_data=True)
@@ -66,10 +66,10 @@ class Stowaway:
def __init__(self, rcfile: str) -> None:
self.rcfile = rcfile
- def __getstate__(self) -> Dict[str, str]:
+ def __getstate__(self) -> dict[str, str]:
return {"rcfile": self.rcfile}
- def __setstate__(self, state: Dict[str, str]) -> None:
+ def __setstate__(self, state: dict[str, str]) -> None:
patch_multiprocessing(state["rcfile"])
@@ -104,7 +104,7 @@ def patch_multiprocessing(rcfile: str) -> None:
except (ImportError, AttributeError):
pass
else:
- def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]:
+ def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]:
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
d["stowaway"] = Stowaway(rcfile)
diff --git a/coverage/numbits.py b/coverage/numbits.py
index 71b974de5..a4eedfa86 100644
--- a/coverage/numbits.py
+++ b/coverage/numbits.py
@@ -20,7 +20,7 @@
import sqlite3
from itertools import zip_longest
-from typing import Iterable, List
+from typing import Iterable
def nums_to_numbits(nums: Iterable[int]) -> bytes:
@@ -43,7 +43,7 @@ def nums_to_numbits(nums: Iterable[int]) -> bytes:
return bytes(b)
-def numbits_to_nums(numbits: bytes) -> List[int]:
+def numbits_to_nums(numbits: bytes) -> list[int]:
"""Convert a numbits into a list of numbers.
Arguments:
diff --git a/coverage/parser.py b/coverage/parser.py
index 3304ecab8..6cf73446e 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -37,9 +37,9 @@ class PythonParser:
"""
def __init__(
self,
- text: Optional[str] = None,
- filename: Optional[str] = None,
- exclude: Optional[str] = None,
+ text: str | None = None,
+ filename: str | None = None,
+ exclude: str | None = None,
) -> None:
"""
Source can be provided as `text`, the text itself, or `filename`, from
@@ -61,45 +61,45 @@ def __init__(
self.exclude = exclude
# The text lines of the parsed code.
- self.lines: List[str] = self.text.split("\n")
+ self.lines: list[str] = self.text.split("\n")
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
- self.statements: Set[TLineNo] = set()
+ self.statements: set[TLineNo] = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
- self.excluded: Set[TLineNo] = set()
+ self.excluded: set[TLineNo] = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
- self.raw_statements: Set[TLineNo] = set()
+ self.raw_statements: set[TLineNo] = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
- self.raw_excluded: Set[TLineNo] = set()
+ self.raw_excluded: set[TLineNo] = set()
# The line numbers of class definitions.
- self.raw_classdefs: Set[TLineNo] = set()
+ self.raw_classdefs: set[TLineNo] = set()
# The line numbers of docstring lines.
- self.raw_docstrings: Set[TLineNo] = set()
+ self.raw_docstrings: set[TLineNo] = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
- self._multiline: Dict[TLineNo, TLineNo] = {}
+ self._multiline: dict[TLineNo, TLineNo] = {}
# Lazily-created arc data, and missing arc descriptions.
- self._all_arcs: Optional[Set[TArc]] = None
- self._missing_arc_fragments: Optional[TArcFragments] = None
+ self._all_arcs: set[TArc] | None = None
+ self._missing_arc_fragments: TArcFragments | None = None
- def lines_matching(self, *regexes: str) -> Set[TLineNo]:
+ def lines_matching(self, *regexes: str) -> set[TLineNo]:
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
@@ -239,7 +239,7 @@ def first_line(self, lineno: TLineNo) -> TLineNo:
lineno = self._multiline.get(lineno, lineno)
return lineno
- def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]:
+ def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]:
"""Map the line numbers in `linenos` to the correct first line of the
statement.
@@ -248,11 +248,11 @@ def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]:
"""
return {self.first_line(l) for l in linenos}
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
"""Implement `FileReporter.translate_arcs`."""
return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs}
@@ -281,7 +281,7 @@ def parse_source(self) -> None:
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
@@ -311,13 +311,13 @@ def _analyze_ast(self) -> None:
self._missing_arc_fragments = aaa.missing_arc_fragments
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
- exit_counts: Dict[TLineNo, int] = collections.defaultdict(int)
+ exit_counts: dict[TLineNo, int] = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
@@ -342,7 +342,7 @@ def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None,
+ executed_arcs: Iterable[TArc] | None = None,
) -> str:
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
@@ -390,8 +390,8 @@ class ByteParser:
def __init__(
self,
text: str,
- code: Optional[CodeType] = None,
- filename: Optional[str] = None,
+ code: CodeType | None = None,
+ filename: str | None = None,
) -> None:
self.text = text
if code is not None:
@@ -474,7 +474,7 @@ class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
to have `lineno` interpolated into it.
"""
- def __new__(cls, lineno: TLineNo, cause: Optional[str] = None) -> ArcStart:
+ def __new__(cls, lineno: TLineNo, cause: str | None = None) -> ArcStart:
return super().__new__(cls, lineno, cause)
@@ -484,8 +484,8 @@ def __call__(
self,
start: TLineNo,
end: TLineNo,
- smsg: Optional[str] = None,
- emsg: Optional[str] = None,
+ smsg: str | None = None,
+ emsg: str | None = None,
) -> None:
...
@@ -501,23 +501,23 @@ class Block:
stack.
"""
# pylint: disable=unused-argument
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process break exits."""
# Because break can only appear in loops, and most subclasses
# implement process_break_exits, this function is never reached.
raise AssertionError
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process continue exits."""
# Because continue can only appear in loops, and most subclasses
# implement process_continue_exits, this function is never reached.
raise AssertionError
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process raise exits."""
return False
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process return exits."""
return False
@@ -528,13 +528,13 @@ def __init__(self, start: TLineNo) -> None:
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
- self.break_exits: Set[ArcStart] = set()
+ self.break_exits: set[ArcStart] = set()
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
self.break_exits.update(exits)
return True
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
return True
@@ -548,7 +548,7 @@ def __init__(self, start: TLineNo, name: str) -> None:
# The name of the function.
self.name = name
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
@@ -556,7 +556,7 @@ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
)
return True
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
@@ -567,7 +567,7 @@ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool
class TryBlock(Block):
"""A block on the block stack representing a `try` block."""
- def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None:
+ def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None:
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
@@ -575,24 +575,24 @@ def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLine
# The ArcStarts for breaks/continues/returns/raises inside the "try:"
# that need to route through the "finally:" clause.
- self.break_from: Set[ArcStart] = set()
- self.continue_from: Set[ArcStart] = set()
- self.raise_from: Set[ArcStart] = set()
- self.return_from: Set[ArcStart] = set()
+ self.break_from: set[ArcStart] = set()
+ self.continue_from: set[ArcStart] = set()
+ self.raise_from: set[ArcStart] = set()
+ self.return_from: set[ArcStart] = set()
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.break_from.update(exits)
return True
return False
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.continue_from.update(exits)
return True
return False
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.handler_start is not None:
for xit in exits:
add_arc(xit.lineno, self.handler_start, xit.cause)
@@ -601,7 +601,7 @@ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
self.raise_from.update(exits)
return True
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.return_from.update(exits)
return True
@@ -620,15 +620,15 @@ def __init__(self, start: TLineNo) -> None:
# The ArcStarts for breaks/continues/returns/raises inside the "with:"
# that need to go through the with-statement while exiting.
- self.break_from: Set[ArcStart] = set()
- self.continue_from: Set[ArcStart] = set()
- self.return_from: Set[ArcStart] = set()
+ self.break_from: set[ArcStart] = set()
+ self.continue_from: set[ArcStart] = set()
+ self.return_from: set[ArcStart] = set()
def _process_exits(
self,
- exits: Set[ArcStart],
+ exits: set[ArcStart],
add_arc: TAddArcFn,
- from_set: Optional[Set[ArcStart]] = None,
+ from_set: set[ArcStart] | None = None,
) -> bool:
"""Helper to process the four kinds of exits."""
for xit in exits:
@@ -637,16 +637,16 @@ def _process_exits(
from_set.update(exits)
return True
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.break_from)
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.continue_from)
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc)
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.return_from)
@@ -680,8 +680,8 @@ class AstArcAnalyzer:
def __init__(
self,
text: str,
- statements: Set[TLineNo],
- multiline: Dict[TLineNo, TLineNo],
+ statements: set[TLineNo],
+ multiline: dict[TLineNo, TLineNo],
) -> None:
self.root_node = ast.parse(text)
# TODO: I think this is happening in too many places.
@@ -696,12 +696,12 @@ def __init__(
# Dump the AST so that failing tests have helpful output.
print(f"Statements: {self.statements}")
print(f"Multiline map: {self.multiline}")
- dumpkw: Dict[str, Any] = {}
+ dumpkw: dict[str, Any] = {}
if sys.version_info >= (3, 9):
dumpkw["indent"] = 4
print(ast.dump(self.root_node, include_attributes=True, **dumpkw))
- self.arcs: Set[TArc] = set()
+ self.arcs: set[TArc] = set()
# A map from arc pairs to a list of pairs of sentence fragments:
# { (start, end): [(startmsg, endmsg), ...], }
@@ -709,7 +709,7 @@ def __init__(
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments: TArcFragments = collections.defaultdict(list)
- self.block_stack: List[Block] = []
+ self.block_stack: list[Block] = []
# $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code.
self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0")))
@@ -731,8 +731,8 @@ def add_arc(
self,
start: TLineNo,
end: TLineNo,
- smsg: Optional[str] = None,
- emsg: Optional[str] = None,
+ smsg: str | None = None,
+ emsg: str | None = None,
) -> None:
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
@@ -811,7 +811,7 @@ def _line__Module(self, node: ast.Module) -> TLineNo:
"Import", "ImportFrom", "Nonlocal", "Pass",
}
- def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
+ def add_arcs(self, node: ast.AST) -> set[ArcStart]:
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next. Because a
@@ -847,9 +847,9 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
def add_body_arcs(
self,
body: Sequence[ast.AST],
- from_start: Optional[ArcStart] = None,
- prev_starts: Optional[Set[ArcStart]] = None,
- ) -> Set[ArcStart]:
+ from_start: ArcStart | None = None,
+ prev_starts: set[ArcStart] | None = None,
+ ) -> set[ArcStart]:
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
@@ -877,7 +877,7 @@ def add_body_arcs(
prev_starts = self.add_arcs(body_node)
return prev_starts
- def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
+ def find_non_missing_node(self, node: ast.AST) -> ast.AST | None:
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
@@ -911,7 +911,7 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
# find_non_missing_node) to find a node to use instead of the missing
# node. They can return None if the node should truly be gone.
- def _missing__If(self, node: ast.If) -> Optional[ast.AST]:
+ def _missing__If(self, node: ast.If) -> ast.AST | None:
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
@@ -922,7 +922,7 @@ def _missing__If(self, node: ast.If) -> Optional[ast.AST]:
return self.find_non_missing_node(NodeList(node.orelse))
return None
- def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]:
+ def _missing__NodeList(self, node: NodeList) -> ast.AST | None:
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
@@ -938,7 +938,7 @@ def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]:
return non_missing_children[0]
return NodeList(non_missing_children)
- def _missing__While(self, node: ast.While) -> Optional[ast.AST]:
+ def _missing__While(self, node: ast.While) -> ast.AST | None:
body_nodes = self.find_non_missing_node(NodeList(node.body))
if not body_nodes:
return None
@@ -953,7 +953,7 @@ def _missing__While(self, node: ast.While) -> Optional[ast.AST]:
new_while.orelse = []
return new_while
- def is_constant_expr(self, node: ast.AST) -> Optional[str]:
+ def is_constant_expr(self, node: ast.AST) -> str | None:
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["Constant", "NameConstant", "Num"]:
@@ -978,25 +978,25 @@ def is_constant_expr(self, node: ast.AST) -> Optional[str]:
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
- def process_break_exits(self, exits: Set[ArcStart]) -> None:
+ def process_break_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_break_exits(exits, self.add_arc):
break
- def process_continue_exits(self, exits: Set[ArcStart]) -> None:
+ def process_continue_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_continue_exits(exits, self.add_arc):
break
- def process_raise_exits(self, exits: Set[ArcStart]) -> None:
+ def process_raise_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if block.process_raise_exits(exits, self.add_arc):
break
- def process_return_exits(self, exits: Set[ArcStart]) -> None:
+ def process_return_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_return_exits(exits, self.add_arc):
@@ -1013,16 +1013,16 @@ def process_return_exits(self, exits: Set[ArcStart]) -> None:
# Every node type that represents a statement should have a handler, or it
# should be listed in OK_TO_DEFAULT.
- def _handle__Break(self, node: ast.Break) -> Set[ArcStart]:
+ def _handle__Break(self, node: ast.Break) -> set[ArcStart]:
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits({break_start})
return set()
- def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]:
+ def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]:
"""Add arcs for things that can be decorated (classes and functions)."""
main_line: TLineNo = node.lineno
- last: Optional[TLineNo] = node.lineno
+ last: TLineNo | None = node.lineno
decs = node.decorator_list
if decs:
last = None
@@ -1052,13 +1052,13 @@ def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]:
_handle__ClassDef = _handle_decorated
- def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]:
+ def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]:
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits({continue_start})
return set()
- def _handle__For(self, node: ast.For) -> Set[ArcStart]:
+ def _handle__For(self, node: ast.For) -> set[ArcStart]:
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
@@ -1083,7 +1083,7 @@ def _handle__For(self, node: ast.For) -> Set[ArcStart]:
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
- def _handle__If(self, node: ast.If) -> Set[ArcStart]:
+ def _handle__If(self, node: ast.If) -> set[ArcStart]:
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
@@ -1092,7 +1092,7 @@ def _handle__If(self, node: ast.If) -> Set[ArcStart]:
return exits
if sys.version_info >= (3, 10):
- def _handle__Match(self, node: ast.Match) -> Set[ArcStart]:
+ def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
start = self.line_for_node(node)
last_start = start
exits = set()
@@ -1115,26 +1115,26 @@ def _handle__Match(self, node: ast.Match) -> Set[ArcStart]:
exits.add(from_start)
return exits
- def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]:
+ def _handle__NodeList(self, node: NodeList) -> set[ArcStart]:
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
- def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]:
+ def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]:
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits({raise_start})
# `raise` statement jumps away, no exits from here.
return set()
- def _handle__Return(self, node: ast.Return) -> Set[ArcStart]:
+ def _handle__Return(self, node: ast.Return) -> set[ArcStart]:
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits({return_start})
# `return` statement jumps away, no exits from here.
return set()
- def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
+ def _handle__Try(self, node: ast.Try) -> set[ArcStart]:
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
@@ -1167,10 +1167,10 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
else:
self.block_stack.pop()
- handler_exits: Set[ArcStart] = set()
+ handler_exits: set[ArcStart] = set()
if node.handlers:
- last_handler_start: Optional[TLineNo] = None
+ last_handler_start: TLineNo | None = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
@@ -1245,7 +1245,7 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
return exits
- def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]:
+ def _combine_finally_starts(self, starts: set[ArcStart], exits: set[ArcStart]) -> set[ArcStart]:
"""Helper for building the cause of `finally` branches.
"finally" clauses might not execute their exits, and the causes could
@@ -1260,7 +1260,7 @@ def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -
exits = {ArcStart(xit.lineno, cause) for xit in exits}
return exits
- def _handle__While(self, node: ast.While) -> Set[ArcStart]:
+ def _handle__While(self, node: ast.While) -> set[ArcStart]:
start = to_top = self.line_for_node(node.test)
constant_test = self.is_constant_expr(node.test)
top_is_body0 = False
@@ -1289,7 +1289,7 @@ def _handle__While(self, node: ast.While) -> Set[ArcStart]:
exits.add(from_start)
return exits
- def _handle__With(self, node: ast.With) -> Set[ArcStart]:
+ def _handle__With(self, node: ast.With) -> set[ArcStart]:
start = self.line_for_node(node)
if env.PYBEHAVIOR.exit_through_with:
self.block_stack.append(WithBlock(start=start))
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 2ee6dd74f..7d8b30c8a 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -13,7 +13,7 @@
import token
import tokenize
-from typing import Iterable, List, Optional, Set, Tuple
+from typing import Iterable
from coverage import env
from coverage.types import TLineNo, TSourceTokenLines
@@ -32,7 +32,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
Returns the same values as generate_tokens()
"""
- last_line: Optional[str] = None
+ last_line: str | None = None
last_lineno = -1
last_ttext: str = ""
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
@@ -81,7 +81,7 @@ class SoftKeywordFinder(ast.NodeVisitor):
"""Helper for finding lines with soft keywords, like match/case lines."""
def __init__(self, source: str) -> None:
# This will be the set of line numbers that start with a soft keyword.
- self.soft_key_lines: Set[TLineNo] = set()
+ self.soft_key_lines: set[TLineNo] = set()
self.visit(ast.parse(source))
if sys.version_info >= (3, 10):
@@ -116,7 +116,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
"""
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
- line: List[Tuple[str, str]] = []
+ line: list[tuple[str, str]] = []
col = 0
source = source.expandtabs(8).replace("\r\n", "\n")
@@ -182,8 +182,8 @@ class CachedTokenizer:
"""
def __init__(self) -> None:
- self.last_text: Optional[str] = None
- self.last_tokens: List[tokenize.TokenInfo] = []
+ self.last_text: str | None = None
+ self.last_tokens: list[tokenize.TokenInfo] = []
def generate_tokens(self, text: str) -> TokenInfos:
"""A stand-in for `tokenize.generate_tokens`."""
diff --git a/coverage/plugin.py b/coverage/plugin.py
index 67dcfbff1..761406688 100644
--- a/coverage/plugin.py
+++ b/coverage/plugin.py
@@ -117,7 +117,7 @@ def coverage_init(reg, options):
import functools
from types import FrameType
-from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union
+from typing import Any, Iterable
from coverage import files
from coverage.misc import _needs_to_implement
@@ -130,7 +130,7 @@ class CoveragePlugin:
_coverage_plugin_name: str
_coverage_enabled: bool
- def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument
+ def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument
"""Get a :class:`FileTracer` object for a file.
Plug-in type: file tracer.
@@ -173,7 +173,7 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=
def file_reporter(
self,
filename: str, # pylint: disable=unused-argument
- ) -> Union[FileReporter, str]: # str should be Literal["python"]
+ ) -> FileReporter | str: # str should be Literal["python"]
"""Get the :class:`FileReporter` class to use for a file.
Plug-in type: file tracer.
@@ -190,7 +190,7 @@ def file_reporter(
def dynamic_context(
self,
frame: FrameType, # pylint: disable=unused-argument
- ) -> Optional[str]:
+ ) -> str | None:
"""Get the dynamically computed context label for `frame`.
Plug-in type: dynamic context.
@@ -238,7 +238,7 @@ def configure(self, config: TConfigurable) -> None:
"""
pass
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
"""Get a list of information useful for debugging.
Plug-in type: any.
@@ -311,7 +311,7 @@ def dynamic_source_filename(
self,
filename: str, # pylint: disable=unused-argument
frame: FrameType, # pylint: disable=unused-argument
- ) -> Optional[str]:
+ ) -> str | None:
"""Get a dynamically computed source file name.
Some plug-ins need to compute the source file name dynamically for each
@@ -326,7 +326,7 @@ def dynamic_source_filename(
"""
return None
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
"""Get the range of source line numbers for a given a call frame.
The call frame is examined, and the source line number in the original
@@ -369,7 +369,7 @@ def __init__(self, filename: str) -> None:
self.filename = filename
def __repr__(self) -> str:
- return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
+ return f"<{self.__class__.__name__} filename={self.filename!r}>"
def relative_filename(self) -> str:
"""Get the relative file name for this file.
@@ -395,7 +395,7 @@ def source(self) -> str:
with open(self.filename, encoding="utf-8") as f:
return f.read()
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
"""Get the executable lines in this file.
Your plug-in must determine which lines in the file were possibly
@@ -406,7 +406,7 @@ def lines(self) -> Set[TLineNo]:
"""
_needs_to_implement(self, "lines")
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
"""Get the excluded executable lines in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -419,7 +419,7 @@ def excluded_lines(self) -> Set[TLineNo]:
"""
return set()
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
"""Translate recorded lines into reported lines.
Some file formats will want to report lines slightly differently than
@@ -439,7 +439,7 @@ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
"""
return set(lines)
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
"""Get the executable arcs in this file.
To support branch coverage, your plug-in needs to be able to indicate
@@ -453,7 +453,7 @@ def arcs(self) -> Set[TArc]:
"""
return set()
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
"""Get the lines excused from branch coverage in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -466,7 +466,7 @@ def no_branch_lines(self) -> Set[TLineNo]:
"""
return set()
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
"""Translate recorded arcs into reported arcs.
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
@@ -479,7 +479,7 @@ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
"""
return set(arcs)
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
"""Get a count of exits from that each line.
To determine which lines are branches, coverage.py looks for lines that
@@ -496,7 +496,7 @@ def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None, # pylint: disable=unused-argument
+ executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument
) -> str:
"""Provide an English sentence describing a missing arc.
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index d1f2250bc..7b843a10b 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -10,7 +10,7 @@
import sys
from types import FrameType
-from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union
+from typing import Any, Iterable, Iterator
from coverage.exceptions import PluginError
from coverage.misc import isolate_module
@@ -26,21 +26,21 @@ class Plugins:
"""The currently loaded collection of coverage.py plugins."""
def __init__(self) -> None:
- self.order: List[CoveragePlugin] = []
- self.names: Dict[str, CoveragePlugin] = {}
- self.file_tracers: List[CoveragePlugin] = []
- self.configurers: List[CoveragePlugin] = []
- self.context_switchers: List[CoveragePlugin] = []
+ self.order: list[CoveragePlugin] = []
+ self.names: dict[str, CoveragePlugin] = {}
+ self.file_tracers: list[CoveragePlugin] = []
+ self.configurers: list[CoveragePlugin] = []
+ self.context_switchers: list[CoveragePlugin] = []
- self.current_module: Optional[str] = None
- self.debug: Optional[TDebugCtl]
+ self.current_module: str | None = None
+ self.debug: TDebugCtl | None
@classmethod
def load_plugins(
cls,
modules: Iterable[str],
config: TPluginConfig,
- debug: Optional[TDebugCtl] = None,
+ debug: TDebugCtl | None = None,
) -> Plugins:
"""Load plugins from `modules`.
@@ -105,7 +105,7 @@ def add_noop(self, plugin: CoveragePlugin) -> None:
def _add_plugin(
self,
plugin: CoveragePlugin,
- specialized: Optional[List[CoveragePlugin]],
+ specialized: list[CoveragePlugin] | None,
) -> None:
"""Add a plugin object.
@@ -166,7 +166,7 @@ def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
self.plugin = plugin
self.debug = debug
- def file_tracer(self, filename: str) -> Optional[FileTracer]:
+ def file_tracer(self, filename: str) -> FileTracer | None:
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
@@ -174,7 +174,7 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]:
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
- def file_reporter(self, filename: str) -> Union[FileReporter, str]:
+ def file_reporter(self, filename: str) -> FileReporter | str:
reporter = self.plugin.file_reporter(filename)
assert isinstance(reporter, FileReporter)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
@@ -183,7 +183,7 @@ def file_reporter(self, filename: str) -> Union[FileReporter, str]:
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
- def dynamic_context(self, frame: FrameType) -> Optional[str]:
+ def dynamic_context(self, frame: FrameType) -> str | None:
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
@@ -197,7 +197,7 @@ def configure(self, config: TConfigurable) -> None:
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
return self.plugin.sys_info()
@@ -225,14 +225,14 @@ def has_dynamic_source_filename(self) -> bool:
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
return has
- def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]:
+ def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None:
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
filename, self._show_frame(frame), dyn,
))
return dyn
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
pair = self.tracer.line_number_range(frame)
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
return pair
@@ -251,37 +251,37 @@ def relative_filename(self) -> str:
self.debug.write(f"relative_filename() --> {ret!r}")
return ret
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
ret = self.reporter.lines()
self.debug.write(f"lines() --> {ret!r}")
return ret
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
ret = self.reporter.excluded_lines()
self.debug.write(f"excluded_lines() --> {ret!r}")
return ret
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
ret = self.reporter.translate_lines(lines)
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
return ret
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
ret = self.reporter.translate_arcs(arcs)
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
return ret
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
ret = self.reporter.no_branch_lines()
self.debug.write(f"no_branch_lines() --> {ret!r}")
return ret
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
ret = self.reporter.exit_counts()
self.debug.write(f"exit_counts() --> {ret!r}")
return ret
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
ret = self.reporter.arcs()
self.debug.write(f"arcs() --> {ret!r}")
return ret
diff --git a/coverage/python.py b/coverage/python.py
index 3deb6819f..0a522d6b9 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -9,7 +9,7 @@
import types
import zipimport
-from typing import Dict, Iterable, Optional, Set, TYPE_CHECKING
+from typing import Iterable, TYPE_CHECKING
from coverage import env
from coverage.exceptions import CoverageException, NoSource
@@ -46,7 +46,7 @@ def get_python_source(filename: str) -> str:
else:
exts = [ext]
- source_bytes: Optional[bytes]
+ source_bytes: bytes | None
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
@@ -73,7 +73,7 @@ def get_python_source(filename: str) -> str:
return source
-def get_zip_bytes(filename: str) -> Optional[bytes]:
+def get_zip_bytes(filename: str) -> bytes | None:
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
@@ -143,7 +143,7 @@ def source_for_morf(morf: TMorf) -> str:
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
- def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
+ def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None:
self.coverage = coverage
filename = source_for_morf(morf)
@@ -166,8 +166,8 @@ def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
name = relative_filename(filename)
self.relname = name
- self._source: Optional[str] = None
- self._parser: Optional[PythonParser] = None
+ self._source: str | None = None
+ self._parser: PythonParser | None = None
self._excluded = None
def __repr__(self) -> str:
@@ -188,22 +188,22 @@ def parser(self) -> PythonParser:
self._parser.parse_source()
return self._parser
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.statements
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.excluded
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
return self.parser.translate_lines(lines)
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
return self.parser.translate_arcs(arcs)
@expensive
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
assert self.coverage is not None
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
@@ -212,18 +212,18 @@ def no_branch_lines(self) -> Set[TLineNo]:
return no_branch
@expensive
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
return self.parser.arcs()
@expensive
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
return self.parser.exit_counts()
def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None,
+ executed_arcs: Iterable[TArc] | None = None,
) -> str:
return self.parser.missing_arc_description(start, end, executed_arcs)
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index 90ad3eb21..69d52c948 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -12,7 +12,7 @@
import threading
from types import FrameType, ModuleType
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast
+from typing import Any, Callable, Set, cast
from coverage import env
from coverage.types import (
@@ -63,18 +63,18 @@ def __init__(self) -> None:
self.data: TTraceData
self.trace_arcs = False
self.should_trace: Callable[[str, FrameType], TFileDisposition]
- self.should_trace_cache: Dict[str, Optional[TFileDisposition]]
- self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None
- self.switch_context: Optional[Callable[[Optional[str]], None]] = None
+ self.should_trace_cache: dict[str, TFileDisposition | None]
+ self.should_start_context: Callable[[FrameType], str | None] | None = None
+ self.switch_context: Callable[[str | None], None] | None = None
self.warn: TWarnFn
# The threading module to use, if any.
- self.threading: Optional[ModuleType] = None
+ self.threading: ModuleType | None = None
- self.cur_file_data: Optional[TTraceFileData] = None
+ self.cur_file_data: TTraceFileData | None = None
self.last_line: TLineNo = 0
- self.cur_file_name: Optional[str] = None
- self.context: Optional[str] = None
+ self.cur_file_name: str | None = None
+ self.context: str | None = None
self.started_context = False
# The data_stack parallels the Python call stack. Each entry is
@@ -85,8 +85,8 @@ def __init__(self) -> None:
# this frame.
# [2] The last line number executed in this frame.
# [3] Boolean: did this frame start a new context?
- self.data_stack: List[Tuple[Optional[TTraceFileData], Optional[str], TLineNo, bool]] = []
- self.thread: Optional[threading.Thread] = None
+ self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = []
+ self.thread: threading.Thread | None = None
self.stopped = False
self._activity = False
@@ -106,11 +106,7 @@ def __repr__(self) -> str:
def log(self, marker: str, *args: Any) -> None:
"""For hard-core logging of what this tracer is doing."""
with open("/tmp/debug_trace.txt", "a") as f:
- f.write("{} {}[{}]".format(
- marker,
- self.id,
- len(self.data_stack),
- ))
+ f.write(f"{marker} {self.id}[{len(self.data_stack)}]")
if 0: # if you want thread ids..
f.write(".{:x}.{:x}".format( # type: ignore[unreachable]
self.thread.ident,
@@ -131,8 +127,8 @@ def _trace(
frame: FrameType,
event: str,
arg: Any, # pylint: disable=unused-argument
- lineno: Optional[TLineNo] = None, # pylint: disable=unused-argument
- ) -> Optional[TTraceFn]:
+ lineno: TLineNo | None = None, # pylint: disable=unused-argument
+ ) -> TTraceFn | None:
"""The trace function passed to sys.settrace."""
if THIS_FILE in frame.f_code.co_filename:
@@ -345,6 +341,6 @@ def reset_activity(self) -> None:
"""Reset the activity() flag."""
self._activity = False
- def get_stats(self) -> Optional[Dict[str, int]]:
+ def get_stats(self) -> dict[str, int] | None:
"""Return a dictionary of statistics, or None."""
return None
diff --git a/coverage/report.py b/coverage/report.py
index 4ad9a83fa..42f7b5aec 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -7,7 +7,7 @@
import sys
-from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
+from typing import Any, IO, Iterable, TYPE_CHECKING
from coverage.exceptions import ConfigError, NoDataError
from coverage.misc import human_sorted_items
@@ -27,11 +27,11 @@ def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.branches = coverage.get_data().has_arcs()
- self.outfile: Optional[IO[str]] = None
+ self.outfile: IO[str] | None = None
self.output_format = self.config.format or "text"
if self.output_format not in {"text", "markdown", "total"}:
raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
- self.fr_analysis: List[Tuple[FileReporter, Analysis]] = []
+ self.fr_analysis: list[tuple[FileReporter, Analysis]] = []
self.skipped_count = 0
self.empty_count = 0
self.total = Numbers(precision=self.config.precision)
@@ -48,10 +48,10 @@ def write_items(self, items: Iterable[str]) -> None:
def _report_text(
self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
+ header: list[str],
+ lines_values: list[list[Any]],
+ total_line: list[Any],
+ end_lines: list[str],
) -> None:
"""Internal method that prints report data in text format.
@@ -109,10 +109,10 @@ def _report_text(
def _report_markdown(
self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
+ header: list[str],
+ lines_values: list[list[Any]],
+ total_line: list[Any],
+ end_lines: list[str],
) -> None:
"""Internal method that prints report data in markdown format.
@@ -156,7 +156,7 @@ def _report_markdown(
# Write the TOTAL line
formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
- total_line_items: List[str] = []
+ total_line_items: list[str] = []
for item, value in zip(header, total_line):
if value == "":
insert = value
@@ -169,7 +169,7 @@ def _report_markdown(
for end_line in end_lines:
self.write(end_line)
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
"""Writes a report summarizing coverage statistics per module.
`outfile` is a text-mode file object to write the summary to.
diff --git a/coverage/report_core.py b/coverage/report_core.py
index 1535bf8f7..f6bb1f373 100644
--- a/coverage/report_core.py
+++ b/coverage/report_core.py
@@ -8,7 +8,7 @@
import sys
from typing import (
- Callable, Iterable, Iterator, IO, Optional, Protocol, Tuple, TYPE_CHECKING,
+ Callable, Iterable, Iterator, IO, Protocol, TYPE_CHECKING,
)
from coverage.exceptions import NoDataError, NotPython
@@ -27,14 +27,14 @@ class Reporter(Protocol):
report_type: str
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Generate a report of `morfs`, written to `outfile`."""
def render_report(
output_path: str,
reporter: Reporter,
- morfs: Optional[Iterable[TMorf]],
+ morfs: Iterable[TMorf] | None,
msgfn: Callable[[str], None],
) -> float:
"""Run a one-file report generator, managing the output file.
@@ -72,8 +72,8 @@ def render_report(
def get_analysis_to_report(
coverage: Coverage,
- morfs: Optional[Iterable[TMorf]],
-) -> Iterator[Tuple[FileReporter, Analysis]]:
+ morfs: Iterable[TMorf] | None,
+) -> Iterator[tuple[FileReporter, Analysis]]:
"""Get the files to report on.
For each morf in `morfs`, if it should be reported on (based on the omit
diff --git a/coverage/results.py b/coverage/results.py
index f5f9a3719..45cc4f198 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -7,7 +7,7 @@
import collections
-from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING
+from typing import Callable, Iterable, TYPE_CHECKING
from coverage.debug import auto_repr
from coverage.exceptions import ConfigError
@@ -86,18 +86,18 @@ def has_arcs(self) -> bool:
"""Were arcs measured in this result?"""
return self.data.has_arcs()
- def arc_possibilities(self) -> List[TArc]:
+ def arc_possibilities(self) -> list[TArc]:
"""Returns a sorted list of the arcs in the code."""
return self._arc_possibilities
- def arcs_executed(self) -> List[TArc]:
+ def arcs_executed(self) -> list[TArc]:
"""Returns a sorted list of the arcs actually executed in the code."""
executed: Iterable[TArc]
executed = self.data.arcs(self.filename) or []
executed = self.file_reporter.translate_arcs(executed)
return sorted(executed)
- def arcs_missing(self) -> List[TArc]:
+ def arcs_missing(self) -> list[TArc]:
"""Returns a sorted list of the un-executed arcs in the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
@@ -109,7 +109,7 @@ def arcs_missing(self) -> List[TArc]:
)
return sorted(missing)
- def arcs_unpredicted(self) -> List[TArc]:
+ def arcs_unpredicted(self) -> list[TArc]:
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
@@ -126,7 +126,7 @@ def arcs_unpredicted(self) -> List[TArc]:
)
return sorted(unpredicted)
- def _branch_lines(self) -> List[TLineNo]:
+ def _branch_lines(self) -> list[TLineNo]:
"""Returns a list of line numbers that have more than one exit."""
return [l1 for l1,count in self.exit_counts.items() if count > 1]
@@ -134,7 +134,7 @@ def _total_branches(self) -> int:
"""How many total branches are there?"""
return sum(count for count in self.exit_counts.values() if count > 1)
- def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
+ def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
@@ -148,7 +148,7 @@ def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
mba[l1].append(l2)
return mba
- def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
+ def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
"""Return arcs that were executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
@@ -162,7 +162,7 @@ def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
eba[l1].append(l2)
return eba
- def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]:
+ def branch_stats(self) -> dict[TLineNo, tuple[int, int]]:
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
@@ -211,7 +211,7 @@ def __init__(
__repr__ = auto_repr
- def init_args(self) -> List[int]:
+ def init_args(self) -> list[int]:
"""Return a list for __init__(*args) to recreate this object."""
return [
self._precision,
@@ -274,7 +274,7 @@ def pc_str_width(self) -> int:
return width
@property
- def ratio_covered(self) -> Tuple[int, int]:
+ def ratio_covered(self) -> tuple[int, int]:
"""Return a numerator and denominator for the coverage ratio."""
numerator = self.n_executed + self.n_executed_branches
denominator = self.n_statements + self.n_branches
@@ -304,7 +304,7 @@ def __radd__(self, other: int) -> Numbers:
def _line_ranges(
statements: Iterable[TLineNo],
lines: Iterable[TLineNo],
-) -> List[Tuple[TLineNo, TLineNo]]:
+) -> list[tuple[TLineNo, TLineNo]]:
"""Produce a list of ranges for `format_lines`."""
statements = sorted(statements)
lines = sorted(lines)
@@ -331,7 +331,7 @@ def _line_ranges(
def format_lines(
statements: Iterable[TLineNo],
lines: Iterable[TLineNo],
- arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]] = None,
+ arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None,
) -> str:
"""Nicely format a list of line numbers.
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index ca06453f3..f12ccd7a9 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -21,8 +21,8 @@
import zlib
from typing import (
- cast, Any, Collection, Dict, List, Mapping,
- Optional, Sequence, Set, Tuple, Union,
+ cast, Any, Collection, Mapping,
+ Sequence,
)
from coverage.debug import NoDebugging, auto_repr
@@ -212,11 +212,11 @@ class CoverageData:
def __init__(
self,
- basename: Optional[FilePath] = None,
- suffix: Optional[Union[str, bool]] = None,
+ basename: FilePath | None = None,
+ suffix: str | bool | None = None,
no_disk: bool = False,
- warn: Optional[TWarnFn] = None,
- debug: Optional[TDebugCtl] = None,
+ warn: TWarnFn | None = None,
+ debug: TDebugCtl | None = None,
) -> None:
"""Create a :class:`CoverageData` object to hold coverage-measured data.
@@ -240,9 +240,9 @@ def __init__(
self._choose_filename()
# Maps filenames to row ids.
- self._file_map: Dict[str, int] = {}
+ self._file_map: dict[str, int] = {}
# Maps thread ids to SqliteDb objects.
- self._dbs: Dict[int, SqliteDb] = {}
+ self._dbs: dict[int, SqliteDb] = {}
self._pid = os.getpid()
# Synchronize the operations used during collection.
self._lock = threading.RLock()
@@ -253,9 +253,9 @@ def __init__(
self._has_lines = False
self._has_arcs = False
- self._current_context: Optional[str] = None
- self._current_context_id: Optional[int] = None
- self._query_context_ids: Optional[List[int]] = None
+ self._current_context: str | None = None
+ self._current_context_id: int | None = None
+ self._query_context_ids: list[int] | None = None
__repr__ = auto_repr
@@ -405,7 +405,7 @@ def loads(self, data: bytes) -> None:
self._read_db()
self._have_used = True
- def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
+ def _file_id(self, filename: str, add: bool = False) -> int | None:
"""Get the file id for `filename`.
If filename is not in the database yet, add it if `add` is True.
@@ -420,7 +420,7 @@ def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
)
return self._file_map.get(filename)
- def _context_id(self, context: str) -> Optional[int]:
+ def _context_id(self, context: str) -> int | None:
"""Get the id for a context."""
assert context is not None
self._start_using()
@@ -432,7 +432,7 @@ def _context_id(self, context: str) -> Optional[int]:
return None
@_locked
- def set_context(self, context: Optional[str]) -> None:
+ def set_context(self, context: str | None) -> None:
"""Set the current context for future :meth:`add_lines` etc.
`context` is a str, the name of the context to use for the next data
@@ -604,7 +604,7 @@ def touch_file(self, filename: str, plugin_name: str = "") -> None:
"""
self.touch_files([filename], plugin_name)
- def touch_files(self, filenames: Collection[str], plugin_name: Optional[str] = None) -> None:
+ def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None:
"""Ensure that `filenames` appear in the data, empty if needed.
`plugin_name` is the name of the plugin responsible for these files.
@@ -647,7 +647,7 @@ def purge_files(self, filenames: Collection[str]) -> None:
continue
con.execute_void(sql, (file_id,))
- def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None) -> None:
+ def update(self, other_data: CoverageData, aliases: PathAliases | None = None) -> None:
"""Update this data with data from several other :class:`CoverageData` instances.
If `aliases` is provided, it's a `PathAliases` object that is used to
@@ -699,7 +699,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"inner join file on file.id = line_bits.file_id " +
"inner join context on context.id = line_bits.context_id",
) as cur:
- lines: Dict[Tuple[str, str], bytes] = {}
+ lines: dict[tuple[str, str], bytes] = {}
for path, context, numbits in cur:
key = (files[path], context)
if key in lines:
@@ -863,7 +863,7 @@ def has_arcs(self) -> bool:
"""Does the database have arcs (True) or lines (False)."""
return bool(self._has_arcs)
- def measured_files(self) -> Set[str]:
+ def measured_files(self) -> set[str]:
"""A set of all files that have been measured.
Note that a file may be mentioned as measured even though no lines or
@@ -872,7 +872,7 @@ def measured_files(self) -> Set[str]:
"""
return set(self._file_map)
- def measured_contexts(self) -> Set[str]:
+ def measured_contexts(self) -> set[str]:
"""A set of all contexts that have been measured.
.. versionadded:: 5.0
@@ -884,7 +884,7 @@ def measured_contexts(self) -> Set[str]:
contexts = {row[0] for row in cur}
return contexts
- def file_tracer(self, filename: str) -> Optional[str]:
+ def file_tracer(self, filename: str) -> str | None:
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
@@ -918,7 +918,7 @@ def set_query_context(self, context: str) -> None:
with con.execute("select id from context where context = ?", (context,)) as cur:
self._query_context_ids = [row[0] for row in cur.fetchall()]
- def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None:
+ def set_query_contexts(self, contexts: Sequence[str] | None) -> None:
"""Set a number of contexts for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
@@ -939,7 +939,7 @@ def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None:
else:
self._query_context_ids = None
- def lines(self, filename: str) -> Optional[List[TLineNo]]:
+ def lines(self, filename: str) -> list[TLineNo] | None:
"""Get the list of lines executed for a source file.
If the file was not measured, returns None. A file might be measured,
@@ -974,7 +974,7 @@ def lines(self, filename: str) -> Optional[List[TLineNo]]:
nums.update(numbits_to_nums(row[0]))
return list(nums)
- def arcs(self, filename: str) -> Optional[List[TArc]]:
+ def arcs(self, filename: str) -> list[TArc] | None:
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
@@ -1006,7 +1006,7 @@ def arcs(self, filename: str) -> Optional[List[TArc]]:
with con.execute(query, data) as cur:
return list(cur)
- def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]:
+ def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]:
"""Get the contexts for each line in a file.
Returns:
@@ -1058,7 +1058,7 @@ def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]:
return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()}
@classmethod
- def sys_info(cls) -> List[Tuple[str, Any]]:
+ def sys_info(cls) -> list[tuple[str, Any]]:
"""Our information for `Coverage.sys_info`.
Returns a list of (key, value) pairs.
@@ -1078,7 +1078,7 @@ def sys_info(cls) -> List[Tuple[str, Any]]:
]
-def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]:
+def filename_suffix(suffix: str | bool | None) -> str | None:
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py
index 521431d66..0a3e83755 100644
--- a/coverage/sqlitedb.py
+++ b/coverage/sqlitedb.py
@@ -9,7 +9,7 @@
import re
import sqlite3
-from typing import cast, Any, Iterable, Iterator, List, Optional, Tuple
+from typing import cast, Any, Iterable, Iterator, Tuple
from coverage.debug import auto_repr, clipped_repr, exc_one_line
from coverage.exceptions import DataError
@@ -32,7 +32,7 @@ def __init__(self, filename: str, debug: TDebugCtl) -> None:
self.debug = debug
self.filename = filename
self.nest = 0
- self.con: Optional[sqlite3.Connection] = None
+ self.con: sqlite3.Connection | None = None
__repr__ = auto_repr
@@ -174,7 +174,7 @@ def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
self.debug.write(f"Row id result: {rowid!r}")
return rowid
- def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tuple[Any, ...]]:
+ def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None:
"""Execute a statement and return the one row that results.
This is like execute(sql, parameters).fetchone(), except it is
@@ -192,7 +192,7 @@ def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tupl
else:
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
- def _executemany(self, sql: str, data: List[Any]) -> sqlite3.Cursor:
+ def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor:
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
if self.debug.should("sql"):
final = ":" if self.debug.should("sqldata") else ""
diff --git a/coverage/sysmon.py b/coverage/sysmon.py
index a2ee87a0b..5e1371a92 100644
--- a/coverage/sysmon.py
+++ b/coverage/sysmon.py
@@ -18,9 +18,6 @@
from typing import (
Any,
Callable,
- Dict,
- List,
- Optional,
Set,
TYPE_CHECKING,
cast,
@@ -75,7 +72,7 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any:
short_stack = functools.partial(
short_stack, full=True, short_filenames=True, frame_ids=True,
)
- seen_threads: Set[int] = set()
+ seen_threads: set[int] = set()
def log(msg: str) -> None:
"""Write a message to our detailed debugging log(s)."""
@@ -108,7 +105,7 @@ def arg_repr(arg: Any) -> str:
)
return repr(arg)
- def panopticon(*names: Optional[str]) -> AnyCallable:
+ def panopticon(*names: str | None) -> AnyCallable:
"""Decorate a function to log its calls."""
def _decorator(method: AnyCallable) -> AnyCallable:
@@ -145,7 +142,7 @@ def _wrapped(self: Any, *args: Any) -> Any:
def log(msg: str) -> None:
"""Write a message to our detailed debugging log(s), but not really."""
- def panopticon(*names: Optional[str]) -> AnyCallable:
+ def panopticon(*names: str | None) -> AnyCallable:
"""Decorate a function to log its calls, but not really."""
def _decorator(meth: AnyCallable) -> AnyCallable:
@@ -159,12 +156,12 @@ class CodeInfo:
"""The information we want about each code object."""
tracing: bool
- file_data: Optional[TTraceFileData]
+ file_data: TTraceFileData | None
# TODO: what is byte_to_line for?
- byte_to_line: Dict[int, int] | None
+ byte_to_line: dict[int, int] | None
-def bytes_to_lines(code: CodeType) -> Dict[int, int]:
+def bytes_to_lines(code: CodeType) -> dict[int, int]:
"""Make a dict mapping byte code offsets to line numbers."""
b2l = {}
for bstart, bend, lineno in code.co_lines():
@@ -184,24 +181,24 @@ def __init__(self, tool_id: int) -> None:
self.data: TTraceData
self.trace_arcs = False
self.should_trace: Callable[[str, FrameType], TFileDisposition]
- self.should_trace_cache: Dict[str, Optional[TFileDisposition]]
+ self.should_trace_cache: dict[str, TFileDisposition | None]
# TODO: should_start_context and switch_context are unused!
# Change tests/testenv.py:DYN_CONTEXTS when this is updated.
- self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None
- self.switch_context: Optional[Callable[[Optional[str]], None]] = None
+ self.should_start_context: Callable[[FrameType], str | None] | None = None
+ self.switch_context: Callable[[str | None], None] | None = None
# TODO: warn is unused.
self.warn: TWarnFn
self.myid = tool_id
# Map id(code_object) -> CodeInfo
- self.code_infos: Dict[int, CodeInfo] = {}
+ self.code_infos: dict[int, CodeInfo] = {}
# A list of code_objects, just to keep them alive so that id's are
# useful as identity.
- self.code_objects: List[CodeType] = []
- self.last_lines: Dict[FrameType, int] = {}
+ self.code_objects: list[CodeType] = []
+ self.last_lines: dict[FrameType, int] = {}
# Map id(code_object) -> code_object
- self.local_event_codes: Dict[int, CodeType] = {}
+ self.local_event_codes: dict[int, CodeType] = {}
self.sysmon_on = False
self.stats = {
@@ -270,7 +267,7 @@ def reset_activity(self) -> None:
"""Reset the activity() flag."""
self._activity = False
- def get_stats(self) -> Optional[Dict[str, int]]:
+ def get_stats(self) -> dict[str, int] | None:
"""Return a dictionary of statistics, or None."""
return None
diff --git a/coverage/templite.py b/coverage/templite.py
index 843ea94eb..4e7491220 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -15,7 +15,7 @@
import re
from typing import (
- Any, Callable, Dict, List, NoReturn, Optional, Set, Union, cast,
+ Any, Callable, Dict, NoReturn, cast,
)
@@ -33,7 +33,7 @@ class CodeBuilder:
"""Build source code conveniently."""
def __init__(self, indent: int = 0) -> None:
- self.code: List[Union[str, CodeBuilder]] = []
+ self.code: list[str | CodeBuilder] = []
self.indent_level = indent
def __str__(self) -> str:
@@ -63,14 +63,14 @@ def dedent(self) -> None:
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
- def get_globals(self) -> Dict[str, Any]:
+ def get_globals(self) -> dict[str, Any]:
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
- global_namespace: Dict[str, Any] = {}
+ global_namespace: dict[str, Any] = {}
exec(python_source, global_namespace)
return global_namespace
@@ -117,7 +117,7 @@ class Templite:
})
"""
- def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
+ def __init__(self, text: str, *contexts: dict[str, Any]) -> None:
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
@@ -128,8 +128,8 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
for context in contexts:
self.context.update(context)
- self.all_vars: Set[str] = set()
- self.loop_vars: Set[str] = set()
+ self.all_vars: set[str] = set()
+ self.loop_vars: set[str] = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
@@ -143,7 +143,7 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
- buffered: List[str] = []
+ buffered: list[str] = []
def flush_output() -> None:
"""Force `buffered` to the code builder."""
@@ -194,10 +194,7 @@ def flush_output() -> None:
ops_stack.append("for")
self._variable(words[1], self.loop_vars)
code.add_line(
- "for c_{} in {}:".format(
- words[1],
- self._expr_code(words[3]),
- ),
+ f"for c_{words[1]} in {self._expr_code(words[3])}:",
)
code.indent()
elif words[0] == "joined":
@@ -268,7 +265,7 @@ def _syntax_error(self, msg: str, thing: Any) -> NoReturn:
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TempliteSyntaxError(f"{msg}: {thing!r}")
- def _variable(self, name: str, vars_set: Set[str]) -> None:
+ def _variable(self, name: str, vars_set: set[str]) -> None:
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
@@ -280,7 +277,7 @@ def _variable(self, name: str, vars_set: Set[str]) -> None:
self._syntax_error("Not a valid name", name)
vars_set.add(name)
- def render(self, context: Optional[Dict[str, Any]] = None) -> str:
+ def render(self, context: dict[str, Any] | None = None) -> str:
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py
index bc4cfc337..1ba282d08 100644
--- a/coverage/tomlconfig.py
+++ b/coverage/tomlconfig.py
@@ -8,7 +8,7 @@
import os
import re
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar
+from typing import Any, Callable, Iterable, TypeVar
from coverage import env
from coverage.exceptions import ConfigError
@@ -40,9 +40,9 @@ class TomlConfigParser:
def __init__(self, our_file: bool) -> None:
self.our_file = our_file
- self.data: Dict[str, Any] = {}
+ self.data: dict[str, Any] = {}
- def read(self, filenames: Iterable[str]) -> List[str]:
+ def read(self, filenames: Iterable[str]) -> list[str]:
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, (bytes, str, os.PathLike))
@@ -67,7 +67,7 @@ def read(self, filenames: Iterable[str]) -> List[str]:
raise ConfigError(msg.format(filename))
return []
- def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSectionOut]]:
+ def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]:
"""Get a section from the data.
Arguments:
@@ -94,7 +94,7 @@ def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSec
return None, None
return real_section, data
- def _get(self, section: str, option: str) -> Tuple[str, TConfigValueOut]:
+ def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]:
"""Like .get, but returns the real section name and the value."""
name, data = self._get_section(section)
if data is None:
@@ -123,7 +123,7 @@ def has_option(self, section: str, option: str) -> bool:
return False
return option in data
- def real_section(self, section: str) -> Optional[str]:
+ def real_section(self, section: str) -> str | None:
name, _ = self._get_section(section)
return name
@@ -131,7 +131,7 @@ def has_section(self, section: str) -> bool:
name, _ = self._get_section(section)
return bool(name)
- def options(self, section: str) -> List[str]:
+ def options(self, section: str) -> list[str]:
_, data = self._get_section(section)
if data is None:
raise ConfigError(f"No section: {section!r}")
@@ -150,8 +150,8 @@ def _check_type(
section: str,
option: str,
value: Any,
- type_: Type[TWant],
- converter: Optional[Callable[[Any], TWant]],
+ type_: type[TWant],
+ converter: Callable[[Any], TWant] | None,
type_desc: str,
) -> TWant:
"""Check that `value` has the type we want, converting if needed.
@@ -176,18 +176,18 @@ def getboolean(self, section: str, option: str) -> bool:
bool_strings = {"true": True, "false": False}
return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean")
- def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]:
+ def _get_list(self, section: str, option: str) -> tuple[str, list[str]]:
"""Get a list of strings, substituting environment variables in the elements."""
name, values = self._get(section, option)
values = self._check_type(name, option, values, list, None, "a list")
values = [substitute_variables(value, os.environ) for value in values]
return name, values
- def getlist(self, section: str, option: str) -> List[str]:
+ def getlist(self, section: str, option: str) -> list[str]:
_, values = self._get_list(section, option)
return values
- def getregexlist(self, section: str, option: str) -> List[str]:
+ def getregexlist(self, section: str, option: str) -> list[str]:
name, values = self._get_list(section, option)
for value in values:
value = value.strip()
diff --git a/coverage/types.py b/coverage/types.py
index 60023143a..d2e0bb965 100644
--- a/coverage/types.py
+++ b/coverage/types.py
@@ -43,8 +43,8 @@ def __call__(
frame: FrameType,
event: str,
arg: Any,
- lineno: Optional[TLineNo] = None, # Our own twist, see collector.py
- ) -> Optional[TTraceFn]:
+ lineno: TLineNo | None = None, # Our own twist, see collector.py
+ ) -> TTraceFn | None:
...
## Coverage.py tracing
@@ -59,10 +59,10 @@ class TFileDisposition(Protocol):
original_filename: str
canonical_filename: str
- source_filename: Optional[str]
+ source_filename: str | None
trace: bool
reason: str
- file_tracer: Optional[FileTracer]
+ file_tracer: FileTracer | None
has_dynamic_filename: bool
@@ -84,9 +84,9 @@ class TracerCore(Protocol):
data: TTraceData
trace_arcs: bool
should_trace: Callable[[str, FrameType], TFileDisposition]
- should_trace_cache: Mapping[str, Optional[TFileDisposition]]
- should_start_context: Optional[Callable[[FrameType], Optional[str]]]
- switch_context: Optional[Callable[[Optional[str]], None]]
+ should_trace_cache: Mapping[str, TFileDisposition | None]
+ should_start_context: Callable[[FrameType], str | None] | None
+ switch_context: Callable[[str | None], None] | None
warn: TWarnFn
def __init__(self) -> None:
@@ -104,7 +104,7 @@ def activity(self) -> bool:
def reset_activity(self) -> None:
"""Reset the activity() flag."""
- def get_stats(self) -> Optional[Dict[str, int]]:
+ def get_stats(self) -> dict[str, int] | None:
"""Return a dictionary of statistics, or None."""
@@ -126,7 +126,7 @@ def get_stats(self) -> Optional[Dict[str, int]]:
class TConfigurable(Protocol):
"""Something that can proxy to the coverage configuration settings."""
- def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
+ def get_option(self, option_name: str) -> TConfigValueOut | None:
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -137,7 +137,7 @@ def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
"""
- def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None:
+ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -173,7 +173,7 @@ class TPlugin(Protocol):
class TWarnFn(Protocol):
"""A callable warn() function."""
- def __call__(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None:
+ def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None:
...
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index ae4393557..b346a2d78 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -12,7 +12,7 @@
import xml.dom.minidom
from dataclasses import dataclass
-from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING
+from typing import Any, IO, Iterable, TYPE_CHECKING
from coverage import __version__, files
from coverage.misc import isolate_module, human_sorted, human_sorted_items
@@ -42,7 +42,7 @@ def rate(hit: int, num: int) -> str:
@dataclass
class PackageData:
"""Data we keep about each "package" (in Java terms)."""
- elements: Dict[str, xml.dom.minidom.Element]
+ elements: dict[str, xml.dom.minidom.Element]
hits: int
lines: int
br_hits: int
@@ -72,10 +72,10 @@ def __init__(self, coverage: Coverage) -> None:
else:
src = files.canonical_filename(src)
self.source_paths.add(src)
- self.packages: Dict[str, PackageData] = {}
+ self.packages: dict[str, PackageData] = {}
self.xml_out: xml.dom.minidom.Document
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or file names.
diff --git a/igor.py b/igor.py
index 56b1a01fa..6c4e9fcbf 100644
--- a/igor.py
+++ b/igor.py
@@ -155,9 +155,9 @@ def should_skip(core):
def make_env_id(core):
"""An environment id that will keep all the test runs distinct."""
impl = platform.python_implementation().lower()
- version = "%s%s" % sys.version_info[:2]
+ version = "{}{}".format(*sys.version_info[:2])
if PYPY:
- version += "_%s%s" % sys.pypy_version_info[:2]
+ version += "_{}{}".format(*sys.pypy_version_info[:2])
env_id = f"{impl}{version}_{core}"
return env_id
diff --git a/tests/conftest.py b/tests/conftest.py
index d19642030..fb2c5bd01 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -15,7 +15,7 @@
import warnings
from pathlib import Path
-from typing import Iterator, Optional
+from typing import Iterator
import pytest
@@ -120,7 +120,7 @@ def possible_pth_dirs() -> Iterator[Path]:
yield Path(sysconfig.get_path("purelib")) # pragma: cant happen
-def find_writable_pth_directory() -> Optional[Path]:
+def find_writable_pth_directory() -> Path | None:
"""Find a place to write a .pth file."""
for pth_dir in possible_pth_dirs(): # pragma: part covered
try_it = pth_dir / f"touch_{WORKER}.it"
diff --git a/tests/coveragetest.py b/tests/coveragetest.py
index 5576bf016..35734f30f 100644
--- a/tests/coveragetest.py
+++ b/tests/coveragetest.py
@@ -19,8 +19,7 @@
from types import ModuleType
from typing import (
- Any, Collection, Dict, Iterable, Iterator, List, Mapping, Optional,
- Sequence, Tuple, Union,
+ Any, Collection, Iterable, Iterator, Mapping, Sequence,
)
import coverage
@@ -68,15 +67,15 @@ def setUp(self) -> None:
super().setUp()
# Attributes for getting info about what happened.
- self.last_command_status: Optional[int] = None
- self.last_command_output: Optional[str] = None
- self.last_module_name: Optional[str] = None
+ self.last_command_status: int | None = None
+ self.last_command_output: str | None = None
+ self.last_module_name: str | None = None
def start_import_stop(
self,
cov: Coverage,
modname: str,
- modfile: Optional[str] = None,
+ modfile: str | None = None,
) -> ModuleType:
"""Start coverage, import a file, then stop coverage.
@@ -128,8 +127,8 @@ def get_module_name(self) -> str:
def _check_arcs(
self,
- a1: Optional[Iterable[TArc]],
- a2: Optional[Iterable[TArc]],
+ a1: Iterable[TArc] | None,
+ a2: Iterable[TArc] | None,
arc_type: str,
) -> str:
"""Check that the arc lists `a1` and `a2` are equal.
@@ -151,17 +150,17 @@ def _check_arcs(
def check_coverage(
self,
text: str,
- lines: Optional[Union[Sequence[TLineNo], Sequence[List[TLineNo]]]] = None,
- missing: Union[str, Sequence[str]] = "",
+ lines: Sequence[TLineNo] | Sequence[list[TLineNo]] | None = None,
+ missing: str | Sequence[str] = "",
report: str = "",
- excludes: Optional[Iterable[str]] = None,
+ excludes: Iterable[str] | None = None,
partials: Iterable[str] = (),
- arcz: Optional[str] = None,
- arcz_missing: Optional[str] = None,
- arcz_unpredicted: Optional[str] = None,
- arcs: Optional[Iterable[TArc]] = None,
- arcs_missing: Optional[Iterable[TArc]] = None,
- arcs_unpredicted: Optional[Iterable[TArc]] = None,
+ arcz: str | None = None,
+ arcz_missing: str | None = None,
+ arcz_unpredicted: str | None = None,
+ arcs: Iterable[TArc] | None = None,
+ arcs_missing: Iterable[TArc] | None = None,
+ arcs_unpredicted: Iterable[TArc] | None = None,
) -> Coverage:
"""Check the coverage measurement of `text`.
@@ -262,11 +261,11 @@ def check_coverage(
def make_data_file(
self,
- basename: Optional[str] = None,
- suffix: Optional[str] = None,
- lines: Optional[Mapping[str, Collection[TLineNo]]] = None,
- arcs: Optional[Mapping[str, Collection[TArc]]] = None,
- file_tracers: Optional[Mapping[str, str]] = None,
+ basename: str | None = None,
+ suffix: str | None = None,
+ lines: Mapping[str, Collection[TLineNo]] | None = None,
+ arcs: Mapping[str, Collection[TArc]] | None = None,
+ file_tracers: Mapping[str, str] | None = None,
) -> CoverageData:
"""Write some data into a coverage data file."""
data = coverage.CoverageData(basename=basename, suffix=suffix)
@@ -306,7 +305,7 @@ def assert_warnings(
saved_warnings = []
def capture_warning(
msg: str,
- slug: Optional[str] = None,
+ slug: str | None = None,
once: bool = False, # pylint: disable=unused-argument
) -> None:
"""A fake implementation of Coverage._warn, to capture warnings."""
@@ -368,7 +367,7 @@ def assert_recent_datetime(
self,
dt: datetime.datetime,
seconds: int = 10,
- msg: Optional[str] = None,
+ msg: str | None = None,
) -> None:
"""Assert that `dt` marks a time at most `seconds` seconds ago."""
age = datetime.datetime.now() - dt
@@ -413,7 +412,7 @@ def run_command(self, cmd: str) -> str:
_, output = self.run_command_status(cmd)
return output
- def run_command_status(self, cmd: str) -> Tuple[int, str]:
+ def run_command_status(self, cmd: str) -> tuple[int, str]:
"""Run the command-line `cmd` in a sub-process, and print its output.
Use this when you need to test the process behavior of coverage.
@@ -479,7 +478,7 @@ def report_from_command(self, cmd: str) -> str:
assert "error" not in report.lower()
return report
- def report_lines(self, report: str) -> List[str]:
+ def report_lines(self, report: str) -> list[str]:
"""Return the lines of the report, as a list."""
lines = report.split('\n')
assert lines[-1] == ""
@@ -489,7 +488,7 @@ def line_count(self, report: str) -> int:
"""How many lines are in `report`?"""
return len(self.report_lines(report))
- def squeezed_lines(self, report: str) -> List[str]:
+ def squeezed_lines(self, report: str) -> list[str]:
"""Return a list of the lines in report, with the spaces squeezed."""
lines = self.report_lines(report)
return [re.sub(r"\s+", " ", l.strip()) for l in lines]
@@ -498,7 +497,7 @@ def last_line_squeezed(self, report: str) -> str:
"""Return the last line of `report` with the spaces squeezed down."""
return self.squeezed_lines(report)[-1]
- def get_measured_filenames(self, coverage_data: CoverageData) -> Dict[str, str]:
+ def get_measured_filenames(self, coverage_data: CoverageData) -> dict[str, str]:
"""Get paths to measured files.
Returns a dict of {filename: absolute path to file}
diff --git a/tests/goldtest.py b/tests/goldtest.py
index 6cd6c32d3..0e3b3fcb4 100644
--- a/tests/goldtest.py
+++ b/tests/goldtest.py
@@ -13,7 +13,7 @@
import re
import xml.etree.ElementTree
-from typing import Iterable, List, Optional, Tuple
+from typing import Iterable
from tests.coveragetest import TESTS_DIR
from tests.helpers import os_sep
@@ -27,9 +27,9 @@ def gold_path(path: str) -> str:
def compare(
expected_dir: str,
actual_dir: str,
- file_pattern: Optional[str] = None,
+ file_pattern: str | None = None,
actual_extra: bool = False,
- scrubs: Optional[List[Tuple[str, str]]] = None,
+ scrubs: list[tuple[str, str]] | None = None,
) -> None:
"""Compare files matching `file_pattern` in `expected_dir` and `actual_dir`.
@@ -175,7 +175,7 @@ def canonicalize_xml(xtext: str) -> str:
return xml.etree.ElementTree.tostring(root).decode("utf-8")
-def _fnmatch_list(files: List[str], file_pattern: Optional[str]) -> List[str]:
+def _fnmatch_list(files: list[str], file_pattern: str | None) -> list[str]:
"""Filter the list of `files` to only those that match `file_pattern`.
If `file_pattern` is None, then return the entire list of files.
Returns a list of the filtered files.
@@ -185,7 +185,7 @@ def _fnmatch_list(files: List[str], file_pattern: Optional[str]) -> List[str]:
return files
-def scrub(strdata: str, scrubs: Iterable[Tuple[str, str]]) -> str:
+def scrub(strdata: str, scrubs: Iterable[tuple[str, str]]) -> str:
"""Scrub uninteresting data from the payload in `strdata`.
`scrubs` is a list of (find, replace) pairs of regexes that are used on
`strdata`. A string is returned.
diff --git a/tests/helpers.py b/tests/helpers.py
index 5a4f6c66a..c9cfa38c0 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -19,8 +19,7 @@
from pathlib import Path
from typing import (
- Any, Callable, Iterable, Iterator, List, NoReturn, Optional, Set, Tuple, Type,
- TypeVar, Union, cast,
+ Any, Callable, Iterable, Iterator, NoReturn, TypeVar, cast,
)
import flaky
@@ -33,7 +32,7 @@
from coverage.types import TArc, TLineNo
-def run_command(cmd: str) -> Tuple[int, str]:
+def run_command(cmd: str) -> tuple[int, str]:
"""Run a command in a sub-process.
Returns the exit status code and the combined stdout and stderr.
@@ -74,7 +73,7 @@ def make_file(
filename: str,
text: str = "",
bytes: bytes = b"",
- newline: Optional[str] = None,
+ newline: str | None = None,
) -> str:
"""Create a file for testing.
@@ -147,7 +146,7 @@ class CheckUniqueFilenames:
"""Asserts the uniqueness of file names passed to a function."""
def __init__(self, wrapped: Callable[..., Any]) -> None:
- self.filenames: Set[str] = set()
+ self.filenames: set[str] = set()
self.wrapped = wrapped
@classmethod
@@ -175,7 +174,7 @@ def wrapper(self, filename: str, *args: Any, **kwargs: Any) -> Any:
return self.wrapped(filename, *args, **kwargs)
-def re_lines(pat: str, text: str, match: bool = True) -> List[str]:
+def re_lines(pat: str, text: str, match: bool = True) -> list[str]:
"""Return a list of lines selected by `pat` in the string `text`.
If `match` is false, the selection is inverted: only the non-matching
@@ -219,7 +218,7 @@ def remove_tree(dirname: str) -> None:
_arcz_map.update({c: 10 + ord(c) - ord('A') for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'})
-def arcz_to_arcs(arcz: str) -> List[TArc]:
+def arcz_to_arcs(arcz: str) -> list[TArc]:
"""Convert a compact textual representation of arcs to a list of pairs.
The text has space-separated pairs of letters. Period is -1, 1-9 are
@@ -270,7 +269,7 @@ def _arcs_to_arcz_repr_one(num: TLineNo) -> str:
return z
-def arcs_to_arcz_repr(arcs: Optional[Iterable[TArc]]) -> str:
+def arcs_to_arcz_repr(arcs: Iterable[TArc] | None) -> str:
"""Convert a list of arcs to a readable multi-line form for asserting.
Each pair is on its own line, with a comment showing the arcz form,
@@ -288,7 +287,7 @@ def arcs_to_arcz_repr(arcs: Optional[Iterable[TArc]]) -> str:
@contextlib.contextmanager
-def change_dir(new_dir: Union[str, Path]) -> Iterator[None]:
+def change_dir(new_dir: str | Path) -> Iterator[None]:
"""Change directory, and then change back.
Use as a context manager, it will return to the original
@@ -305,8 +304,8 @@ def change_dir(new_dir: Union[str, Path]) -> Iterator[None]:
T = TypeVar("T")
def assert_count_equal(
- a: Optional[Iterable[T]],
- b: Optional[Iterable[T]],
+ a: Iterable[T] | None,
+ b: Iterable[T] | None,
) -> None:
"""
A pytest-friendly implementation of assertCountEqual.
@@ -321,7 +320,7 @@ def assert_count_equal(
def assert_coverage_warnings(
warns: Iterable[warnings.WarningMessage],
- *msgs: Union[str, re.Pattern[str]],
+ *msgs: str | re.Pattern[str],
) -> None:
"""
Assert that the CoverageWarning's in `warns` have `msgs` as messages.
@@ -343,7 +342,7 @@ def assert_coverage_warnings(
@contextlib.contextmanager
def swallow_warnings(
message: str = r".",
- category: Type[Warning] = CoverageWarning,
+ category: type[Warning] = CoverageWarning,
) -> Iterator[None]:
"""Swallow particular warnings.
@@ -362,7 +361,7 @@ def swallow_warnings(
class FailingProxy:
"""A proxy for another object, but one method will fail a few times before working."""
- def __init__(self, obj: Any, methname: str, fails: List[Exception]) -> None:
+ def __init__(self, obj: Any, methname: str, fails: list[Exception]) -> None:
"""Create the failing proxy.
`obj` is the object to proxy. `methname` is the method that will fail
diff --git a/tests/mixins.py b/tests/mixins.py
index c8f79d675..b8bcc08e8 100644
--- a/tests/mixins.py
+++ b/tests/mixins.py
@@ -14,7 +14,7 @@
import os.path
import sys
-from typing import Any, Callable, Iterable, Iterator, Optional, Tuple, cast
+from typing import Any, Callable, Iterable, Iterator, Tuple, cast
import pytest
@@ -83,7 +83,7 @@ def make_file(
filename: str,
text: str = "",
bytes: bytes = b"",
- newline: Optional[str] = None,
+ newline: str | None = None,
) -> str:
"""Make a file. See `tests.helpers.make_file`"""
# pylint: disable=redefined-builtin # bytes
@@ -136,7 +136,7 @@ def _capcapsys(self, capsys: pytest.CaptureFixture[str]) -> None:
"""Grab the fixture so our methods can use it."""
self.capsys = capsys
- def stdouterr(self) -> Tuple[str, str]:
+ def stdouterr(self) -> tuple[str, str]:
"""Returns (out, err), two strings for stdout and stderr."""
return cast(Tuple[str, str], self.capsys.readouterr())
diff --git a/tests/plugin1.py b/tests/plugin1.py
index afaa17222..6d0b27f41 100644
--- a/tests/plugin1.py
+++ b/tests/plugin1.py
@@ -8,7 +8,7 @@
import os.path
from types import FrameType
-from typing import Any, Optional, Set, Tuple, Union
+from typing import Any
from coverage import CoveragePlugin, FileReporter, FileTracer
from coverage.plugin_support import Plugins
@@ -17,13 +17,13 @@
class Plugin(CoveragePlugin):
"""A file tracer plugin to import, so that it isn't in the test's current directory."""
- def file_tracer(self, filename: str) -> Optional[FileTracer]:
+ def file_tracer(self, filename: str) -> FileTracer | None:
"""Trace only files named xyz.py"""
if "xyz.py" in filename:
return MyFileTracer(filename)
return None
- def file_reporter(self, filename: str) -> Union[FileReporter, str]:
+ def file_reporter(self, filename: str) -> FileReporter | str:
return MyFileReporter(filename)
@@ -41,7 +41,7 @@ def __init__(self, filename: str) -> None:
def source_filename(self) -> str:
return self._source_filename
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
"""Map the line number X to X05,X06,X07."""
lineno = frame.f_lineno
return lineno*100+5, lineno*100+7
@@ -49,7 +49,7 @@ def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
class MyFileReporter(FileReporter):
"""Dead-simple FileReporter."""
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
return {105, 106, 107, 205, 206, 207}
diff --git a/tests/plugin2.py b/tests/plugin2.py
index 5cb8fbb6f..07cce1c9f 100644
--- a/tests/plugin2.py
+++ b/tests/plugin2.py
@@ -8,7 +8,7 @@
import os.path
from types import FrameType
-from typing import Any, Optional, Set, Tuple
+from typing import Any
from coverage import CoveragePlugin, FileReporter, FileTracer
from coverage.plugin_support import Plugins
@@ -25,7 +25,7 @@
class Plugin(CoveragePlugin):
"""A file tracer plugin for testing."""
- def file_tracer(self, filename: str) -> Optional[FileTracer]:
+ def file_tracer(self, filename: str) -> FileTracer | None:
if "render.py" in filename:
return RenderFileTracer()
return None
@@ -44,20 +44,20 @@ def dynamic_source_filename(
self,
filename: str,
frame: FrameType,
- ) -> Optional[str]:
+ ) -> str | None:
if frame.f_code.co_name != "render":
return None
source_filename: str = os.path.abspath(frame.f_locals['filename'])
return source_filename
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
lineno = frame.f_locals['linenum']
return lineno, lineno+1
class MyFileReporter(FileReporter):
"""A goofy file reporter."""
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
diff --git a/tests/test_api.py b/tests/test_api.py
index 7e291b21d..b6ab9cda0 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -15,7 +15,7 @@
import sys
import textwrap
-from typing import cast, Callable, Dict, Iterable, List, Optional, Set
+from typing import cast, Callable, Iterable
import pytest
@@ -37,7 +37,7 @@
class ApiTest(CoverageTest):
"""Api-oriented tests for coverage.py."""
- def clean_files(self, files: List[str], pats: List[str]) -> List[str]:
+ def clean_files(self, files: list[str], pats: list[str]) -> list[str]:
"""Remove names matching `pats` from `files`, a list of file names."""
good = []
for f in files:
@@ -48,7 +48,7 @@ def clean_files(self, files: List[str], pats: List[str]) -> List[str]:
good.append(f)
return good
- def assertFiles(self, files: List[str]) -> None:
+ def assertFiles(self, files: list[str]) -> None:
"""Assert that the files here are `files`, ignoring the usual junk."""
here = os.listdir(".")
here = self.clean_files(here, ["*.pyc", "__pycache__", "*$py.class"])
@@ -491,7 +491,7 @@ def make_files() -> None:
},
)
- def get_combined_filenames() -> Set[str]:
+ def get_combined_filenames() -> set[str]:
cov = coverage.Coverage()
cov.combine()
assert self.stdout() == ""
@@ -748,7 +748,7 @@ class CurrentInstanceTest(CoverageTest):
run_in_temp_dir = False
- def assert_current_is_none(self, current: Optional[Coverage]) -> None:
+ def assert_current_is_none(self, current: Coverage | None) -> None:
"""Assert that a current we expect to be None is correct."""
# During meta-coverage, the None answers will be wrong because the
# overall coverage measurement will still be on the current-stack.
@@ -878,7 +878,7 @@ def setUp(self) -> None:
)
sys.path.insert(0, abs_file("tests_dir_modules"))
- def coverage_usepkgs_counts(self, **kwargs: TCovKwargs) -> Dict[str, int]:
+ def coverage_usepkgs_counts(self, **kwargs: TCovKwargs) -> dict[str, int]:
"""Run coverage on usepkgs and return a line summary.
Arguments are passed to the `coverage.Coverage` constructor.
diff --git a/tests/test_arcs.py b/tests/test_arcs.py
index 1c1437fbe..b544b3ca3 100644
--- a/tests/test_arcs.py
+++ b/tests/test_arcs.py
@@ -49,7 +49,7 @@ def test_simple_sequence(self) -> None:
c = 5
""",
- arcz="-{0}2 23 35 5-{0}".format(line1),
+ arcz=f"-{line1}2 23 35 5-{line1}",
)
def test_function_def(self) -> None:
@@ -477,7 +477,7 @@ def method(self):
num_stmts = 3
else:
num_stmts = 2
- expected = "zero.py {n} {n} 0 0 0% 1-3".format(n=num_stmts)
+ expected = f"zero.py {num_stmts} {num_stmts} 0 0 0% 1-3"
report = self.get_report(cov, show_missing=True)
squeezed = self.squeezed_lines(report)
assert expected in squeezed[3]
@@ -1637,10 +1637,10 @@ def test_pathologically_long_code_object(self, n: int) -> None:
# line-number packing.
code = """\
data = [
- """ + "".join("""\
+ """ + "".join(f"""\
[
{i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}],
- """.format(i=i) for i in range(n)
+ """ for i in range(n)
) + """\
]
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index f24ddf88c..f832fcc94 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -13,7 +13,7 @@
import textwrap
from unittest import mock
-from typing import Any, List, Mapping, Optional, Tuple
+from typing import Any, Mapping
import pytest
@@ -99,8 +99,8 @@ def model_object(self) -> mock.Mock:
def mock_command_line(
self,
args: str,
- options: Optional[Mapping[str, TConfigValueIn]] = None,
- ) -> Tuple[mock.Mock, int]:
+ options: Mapping[str, TConfigValueIn] | None = None,
+ ) -> tuple[mock.Mock, int]:
"""Run `args` through the command line, with a Mock.
`options` is a dict of names and values to pass to `set_option`.
@@ -133,7 +133,7 @@ def cmd_executes(
args: str,
code: str,
ret: int = OK,
- options: Optional[Mapping[str, TConfigValueIn]] = None,
+ options: Mapping[str, TConfigValueIn] | None = None,
) -> None:
"""Assert that the `args` end up executing the sequence in `code`."""
called, status = self.mock_command_line(args, options=options)
@@ -176,8 +176,8 @@ def assert_same_mock_calls(self, m1: mock.Mock, m2: mock.Mock) -> None:
def cmd_help(
self,
args: str,
- help_msg: Optional[str] = None,
- topic: Optional[str] = None,
+ help_msg: str | None = None,
+ topic: str | None = None,
ret: int = ERR,
) -> None:
"""Run a command line, and check that it prints the right help.
@@ -1085,7 +1085,7 @@ class CmdMainTest(CoverageTest):
class CoverageScriptStub:
"""A stub for coverage.cmdline.CoverageScript, used by CmdMainTest."""
- def command_line(self, argv: List[str]) -> int:
+ def command_line(self, argv: list[str]) -> int:
"""Stub for command_line, the arg determines what it will do."""
if argv[0] == 'hello':
print("Hello, world!")
@@ -1207,8 +1207,8 @@ class FailUnderTest(CoverageTest):
])
def test_fail_under(
self,
- results: Tuple[float, float, float, float, float],
- fail_under: Optional[float],
+ results: tuple[float, float, float, float, float],
+ fail_under: float | None,
cmd: str,
ret: int,
) -> None:
diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py
index ae54f66df..94728641a 100644
--- a/tests/test_concurrency.py
+++ b/tests/test_concurrency.py
@@ -16,7 +16,7 @@
import time
from types import ModuleType
-from typing import Iterable, Optional
+from typing import Iterable
from flaky import flaky
import pytest
@@ -176,7 +176,7 @@ def sum_range(limit):
"""
-def cant_trace_msg(concurrency: str, the_module: Optional[ModuleType]) -> Optional[str]:
+def cant_trace_msg(concurrency: str, the_module: ModuleType | None) -> str | None:
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
@@ -209,7 +209,7 @@ def try_some_code(
code: str,
concurrency: str,
the_module: ModuleType,
- expected_out: Optional[str] = None,
+ expected_out: str | None = None,
) -> None:
"""Run some concurrency testing code and see that it was all covered.
@@ -459,7 +459,7 @@ class MultiprocessingTest(CoverageTest):
def try_multiprocessing_code(
self,
code: str,
- expected_out: Optional[str],
+ expected_out: str | None,
the_module: ModuleType,
nprocs: int,
start_method: str,
diff --git a/tests/test_context.py b/tests/test_context.py
index d3803ace3..616a3d609 100644
--- a/tests/test_context.py
+++ b/tests/test_context.py
@@ -8,7 +8,7 @@
import inspect
import os.path
-from typing import Any, List, Optional, Tuple
+from typing import Any
from unittest import mock
import pytest
@@ -50,7 +50,7 @@ def test_static_context(self) -> None:
LINES = [1, 2, 4]
ARCS = [(-1, 1), (1, 2), (2, 4), (4, -1)]
- def run_red_blue(self, **options: TCovKwargs) -> Tuple[CoverageData, CoverageData]:
+ def run_red_blue(self, **options: TCovKwargs) -> tuple[CoverageData, CoverageData]:
"""Run red.py and blue.py, and return their CoverageData objects."""
self.make_file("red.py", self.SOURCE)
red_cov = coverage.Coverage(context="red", data_suffix="r", source=["."], **options)
@@ -81,7 +81,7 @@ def test_combining_line_contexts(self) -> None:
fred = full_names['red.py']
fblue = full_names['blue.py']
- def assert_combined_lines(filename: str, context: str, lines: List[TLineNo]) -> None:
+ def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None:
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
assert combined.lines(filename) == lines
@@ -106,7 +106,7 @@ def test_combining_arc_contexts(self) -> None:
fred = full_names['red.py']
fblue = full_names['blue.py']
- def assert_combined_lines(filename: str, context: str, lines: List[TLineNo]) -> None:
+ def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None:
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
assert combined.lines(filename) == lines
@@ -116,7 +116,7 @@ def assert_combined_lines(filename: str, context: str, lines: List[TLineNo]) ->
assert_combined_lines(fblue, 'red', [])
assert_combined_lines(fblue, 'blue', self.LINES)
- def assert_combined_arcs(filename: str, context: str, lines: List[TArc]) -> None:
+ def assert_combined_arcs(filename: str, context: str, lines: list[TArc]) -> None:
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
assert combined.arcs(filename) == lines
@@ -172,7 +172,7 @@ def test_dynamic_alone(self) -> None:
["", "two_tests.test_one", "two_tests.test_two"],
)
- def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
+ def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert_count_equal(lines, sorted_lines(data, fname))
@@ -194,7 +194,7 @@ def test_static_and_dynamic(self) -> None:
["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"],
)
- def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
+ def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert_count_equal(lines, sorted_lines(data, fname))
@@ -203,7 +203,7 @@ def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
assert_context_lines("stat|two_tests.test_two", self.TEST_TWO_LINES)
-def get_qualname() -> Optional[str]:
+def get_qualname() -> str | None:
"""Helper to return qualname_from_frame for the caller."""
stack = inspect.stack()[1:]
if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack):
@@ -216,11 +216,11 @@ def get_qualname() -> Optional[str]:
# pylint: disable=missing-class-docstring, missing-function-docstring, unused-argument
class Parent:
- def meth(self) -> Optional[str]:
+ def meth(self) -> str | None:
return get_qualname()
@property
- def a_property(self) -> Optional[str]:
+ def a_property(self) -> str | None:
return get_qualname()
class Child(Parent):
@@ -232,16 +232,16 @@ class SomethingElse:
class MultiChild(SomethingElse, Child):
pass
-def no_arguments() -> Optional[str]:
+def no_arguments() -> str | None:
return get_qualname()
-def plain_old_function(a: Any, b: Any) -> Optional[str]:
+def plain_old_function(a: Any, b: Any) -> str | None:
return get_qualname()
-def fake_out(self: Any) -> Optional[str]:
+def fake_out(self: Any) -> str | None:
return get_qualname()
-def patch_meth(self: Any) -> Optional[str]:
+def patch_meth(self: Any) -> str | None:
return get_qualname()
# pylint: enable=missing-class-docstring, missing-function-docstring, unused-argument
diff --git a/tests/test_data.py b/tests/test_data.py
index 76dad3c4e..1f0bb20dc 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -13,7 +13,7 @@
import threading
from typing import (
- Any, Callable, Collection, Dict, Iterable, Mapping, Set, TypeVar, Union,
+ Any, Callable, Collection, Iterable, Mapping, TypeVar, Union,
)
from unittest import mock
@@ -120,7 +120,7 @@ def assert_arcs3_data(covdata: CoverageData) -> None:
TData = TypeVar("TData", bound=Union[TLineNo, TArc])
-def dicts_from_sets(file_data: Dict[str, Set[TData]]) -> Dict[str, Dict[TData, None]]:
+def dicts_from_sets(file_data: dict[str, set[TData]]) -> dict[str, dict[TData, None]]:
"""Convert a dict of sets into a dict of dicts.
Before 6.0, file data was a dict with None as the values. In 6.0, file
diff --git a/tests/test_files.py b/tests/test_files.py
index d08b9a78d..aac9c4279 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -10,7 +10,7 @@
import os.path
import re
-from typing import Any, Iterable, Iterator, List, Protocol
+from typing import Any, Iterable, Iterator, Protocol
from unittest import mock
import pytest
@@ -464,7 +464,7 @@ def test_relative_pattern(self) -> None:
def test_multiple_patterns(self, rel_yn: bool) -> None:
# also test the debugfn...
- msgs: List[str] = []
+ msgs: list[str] = []
aliases = PathAliases(debugfn=msgs.append, relative=rel_yn)
aliases.add('/home/*/src', './mysrc')
aliases.add('/lib/*/libsrc', './mylib')
diff --git a/tests/test_html.py b/tests/test_html.py
index b143d70a6..cab0bae48 100644
--- a/tests/test_html.py
+++ b/tests/test_html.py
@@ -15,7 +15,7 @@
import sys
from unittest import mock
-from typing import Any, Dict, IO, List, Optional, Set, Tuple
+from typing import Any, IO
import pytest
@@ -56,8 +56,8 @@ def func2(x):
def run_coverage(
self,
- covargs: Optional[Dict[str, Any]] = None,
- htmlargs: Optional[Dict[str, Any]] = None,
+ covargs: dict[str, Any] | None = None,
+ htmlargs: dict[str, Any] | None = None,
) -> float:
"""Run coverage.py on main_file.py, and create an HTML report."""
self.clean_local_file_imports()
@@ -134,7 +134,7 @@ def assert_valid_hrefs(self) -> None:
class FileWriteTracker:
"""A fake object to track how `open` is used to write files."""
- def __init__(self, written: Set[str]) -> None:
+ def __init__(self, written: set[str]) -> None:
self.written = written
def open(self, filename: str, mode: str = "r") -> IO[str]:
@@ -155,12 +155,12 @@ def setUp(self) -> None:
self.real_coverage_version = coverage.__version__
self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version)
- self.files_written: Set[str]
+ self.files_written: set[str]
def run_coverage(
self,
- covargs: Optional[Dict[str, Any]] = None,
- htmlargs: Optional[Dict[str, Any]] = None,
+ covargs: dict[str, Any] | None = None,
+ htmlargs: dict[str, Any] | None = None,
) -> float:
"""Run coverage in-process for the delta tests.
@@ -659,7 +659,7 @@ def filepath_to_regex(path: str) -> str:
def compare_html(
expected: str,
actual: str,
- extra_scrubs: Optional[List[Tuple[str, str]]] = None,
+ extra_scrubs: list[tuple[str, str]] | None = None,
) -> None:
"""Specialized compare function for our HTML files."""
__tracebackhide__ = True # pytest, please don't show me this function.
@@ -1208,7 +1208,7 @@ def test_filtered_dynamic_contexts(self) -> None:
d = self.html_data_from_cov(cov, mod)
context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two']
- expected_lines: List[List[TLineNo]] = [[], self.TEST_ONE_LINES, []]
+ expected_lines: list[list[TLineNo]] = [[], self.TEST_ONE_LINES, []]
for label, expected in zip(context_labels, expected_lines):
actual = [ld.number for ld in d.lines if label in (ld.contexts or ())]
assert sorted(expected) == sorted(actual)
diff --git a/tests/test_json.py b/tests/test_json.py
index b51d0b54a..c5ef71cb8 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -9,7 +9,7 @@
import os
from datetime import datetime
-from typing import Any, Dict
+from typing import Any
import coverage
from coverage import Coverage
@@ -24,7 +24,7 @@ class JsonReportTest(UsingModulesMixin, CoverageTest):
def _assert_expected_json_report(
self,
cov: Coverage,
- expected_result: Dict[str, Any],
+ expected_result: dict[str, Any],
) -> None:
"""
Helper that handles common ceremonies so tests can clearly show the
@@ -142,16 +142,16 @@ def test_simple_line_coverage(self) -> None:
def run_context_test(self, relative_files: bool) -> None:
"""A helper for two tests below."""
- self.make_file("config", """\
+ self.make_file("config", f"""\
[run]
- relative_files = {}
+ relative_files = {relative_files}
[report]
precision = 2
[json]
show_contexts = True
- """.format(relative_files))
+ """)
cov = coverage.Coverage(context="cool_test", config_file="config")
expected_result = {
'meta': {
diff --git a/tests/test_numbits.py b/tests/test_numbits.py
index f921dee47..44a264197 100644
--- a/tests/test_numbits.py
+++ b/tests/test_numbits.py
@@ -8,7 +8,7 @@
import json
import sqlite3
-from typing import Iterable, Set
+from typing import Iterable
from hypothesis import example, given, settings
from hypothesis.strategies import sets, integers
@@ -54,7 +54,7 @@ def test_conversion(self, nums: Iterable[int]) -> None:
@given(line_number_sets, line_number_sets)
@settings(default_settings)
- def test_union(self, nums1: Set[int], nums2: Set[int]) -> None:
+ def test_union(self, nums1: set[int], nums2: set[int]) -> None:
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
@@ -66,7 +66,7 @@ def test_union(self, nums1: Set[int], nums2: Set[int]) -> None:
@given(line_number_sets, line_number_sets)
@settings(default_settings)
- def test_intersection(self, nums1: Set[int], nums2: Set[int]) -> None:
+ def test_intersection(self, nums1: set[int], nums2: set[int]) -> None:
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
@@ -78,7 +78,7 @@ def test_intersection(self, nums1: Set[int], nums2: Set[int]) -> None:
@given(line_number_sets, line_number_sets)
@settings(default_settings)
- def test_any_intersection(self, nums1: Set[int], nums2: Set[int]) -> None:
+ def test_any_intersection(self, nums1: set[int], nums2: set[int]) -> None:
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index 39390ee35..0fd8cd031 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -10,7 +10,7 @@
import math
import os.path
-from typing import Any, Dict, List, Optional
+from typing import Any
from xml.etree import ElementTree
import pytest
@@ -39,10 +39,10 @@ def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
class FakeConfig(TPluginConfig):
"""A fake config for use in tests."""
- def __init__(self, plugin: str, options: Dict[str, Any]) -> None:
+ def __init__(self, plugin: str, options: dict[str, Any]) -> None:
self.plugin = plugin
self.options = options
- self.asked_for: List[str] = []
+ self.asked_for: list[str] = []
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
"""Just return the options for `plugin` if this is the right module."""
@@ -631,8 +631,8 @@ def run_bad_plugin(
module_name: str,
plugin_name: str,
our_error: bool = True,
- excmsg: Optional[str] = None,
- excmsgs: Optional[List[str]] = None,
+ excmsg: str | None = None,
+ excmsgs: list[str] | None = None,
) -> None:
"""Run a file, and see that the plugin failed.
@@ -1125,7 +1125,7 @@ def test_plugin_with_test_function(self) -> None:
]
assert expected == sorted(data.measured_contexts())
- def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
+ def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert lines == sorted_lines(data, filenames['rendering.py'])
@@ -1163,7 +1163,7 @@ def test_multiple_plugins(self) -> None:
]
assert expected == sorted(data.measured_contexts())
- def assert_context_lines(context: str, lines: List[TLineNo]) -> None:
+ def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert lines == sorted_lines(data, filenames['rendering.py'])
diff --git a/tests/test_process.py b/tests/test_process.py
index 22180b2f2..27d01c0f0 100644
--- a/tests/test_process.py
+++ b/tests/test_process.py
@@ -815,7 +815,7 @@ def test_coverage_custom_script(self) -> None:
SOMETHING = "hello-xyzzy"
""")
abc = os.path.abspath("a/b/c")
- self.make_file("run_coverage.py", """\
+ self.make_file("run_coverage.py", f"""\
import sys
sys.path[0:0] = [
r'{abc}',
@@ -826,7 +826,7 @@ def test_coverage_custom_script(self) -> None:
if __name__ == '__main__':
sys.exit(coverage.cmdline.main())
- """.format(abc=abc))
+ """)
self.make_file("how_is_it.py", """\
import pprint, sys
pprint.pprint(sys.path)
diff --git a/tests/test_report.py b/tests/test_report.py
index 37b24ab69..37850cb20 100644
--- a/tests/test_report.py
+++ b/tests/test_report.py
@@ -13,7 +13,6 @@
import py_compile
import re
-from typing import Tuple
import pytest
@@ -994,7 +993,7 @@ def make_rigged_file(self, filename: str, stmts: int, miss: int) -> None:
source += " a = 2\n" * dont_run
self.make_file(filename, source)
- def get_summary_text(self, *options: Tuple[str, TConfigValueIn]) -> str:
+ def get_summary_text(self, *options: tuple[str, TConfigValueIn]) -> str:
"""Get text output from the SummaryReporter.
The arguments are tuples: (name, value) for Coverage.set_option.
diff --git a/tests/test_report_core.py b/tests/test_report_core.py
index 77e234b66..a27568152 100644
--- a/tests/test_report_core.py
+++ b/tests/test_report_core.py
@@ -5,7 +5,7 @@
from __future__ import annotations
-from typing import IO, Iterable, List, Optional, Type
+from typing import IO, Iterable
import pytest
@@ -21,12 +21,12 @@ class FakeReporter:
report_type = "fake report file"
- def __init__(self, output: str = "", error: Optional[Type[Exception]] = None) -> None:
+ def __init__(self, output: str = "", error: type[Exception] | None = None) -> None:
self.output = output
self.error = error
- self.morfs: Optional[Iterable[TMorf]] = None
+ self.morfs: Iterable[TMorf] | None = None
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Fake."""
self.morfs = morfs
outfile.write(self.output)
@@ -40,7 +40,7 @@ class RenderReportTest(CoverageTest):
def test_stdout(self) -> None:
fake = FakeReporter(output="Hello!\n")
- msgs: List[str] = []
+ msgs: list[str] = []
res = render_report("-", fake, [pytest, "coverage"], msgs.append)
assert res == 17.25
assert fake.morfs == [pytest, "coverage"]
@@ -49,7 +49,7 @@ def test_stdout(self) -> None:
def test_file(self) -> None:
fake = FakeReporter(output="Gréètings!\n")
- msgs: List[str] = []
+ msgs: list[str] = []
res = render_report("output.txt", fake, [], msgs.append)
assert res == 17.25
assert self.stdout() == ""
@@ -58,9 +58,9 @@ def test_file(self) -> None:
assert msgs == ["Wrote fake report file to output.txt"]
@pytest.mark.parametrize("error", [CoverageException, ZeroDivisionError])
- def test_exception(self, error: Type[Exception]) -> None:
+ def test_exception(self, error: type[Exception]) -> None:
fake = FakeReporter(error=error)
- msgs: List[str] = []
+ msgs: list[str] = []
with pytest.raises(error, match="You asked for it!"):
render_report("output.txt", fake, [], msgs.append)
assert self.stdout() == ""
diff --git a/tests/test_results.py b/tests/test_results.py
index 5afddc523..e05824033 100644
--- a/tests/test_results.py
+++ b/tests/test_results.py
@@ -7,7 +7,7 @@
import math
-from typing import Dict, Iterable, List, Tuple, cast
+from typing import Iterable, cast
import pytest
@@ -60,7 +60,7 @@ def test_sum(self) -> None:
(dict(precision=1, n_files=1, n_statements=10000, n_missing=9999), "0.1"),
(dict(precision=1, n_files=1, n_statements=10000, n_missing=10000), "0.0"),
])
- def test_pc_covered_str(self, kwargs: Dict[str, int], res: str) -> None:
+ def test_pc_covered_str(self, kwargs: dict[str, int], res: str) -> None:
assert Numbers(**kwargs).pc_covered_str == res
@pytest.mark.parametrize("prec, pc, res", [
@@ -165,7 +165,7 @@ def test_format_lines(
def test_format_lines_with_arcs(
statements: Iterable[TLineNo],
lines: Iterable[TLineNo],
- arcs: Iterable[Tuple[TLineNo, List[TLineNo]]],
+ arcs: Iterable[tuple[TLineNo, list[TLineNo]]],
result: str,
) -> None:
assert format_lines(statements, lines, arcs) == result
diff --git a/tests/test_templite.py b/tests/test_templite.py
index 0ca9b3ccd..3484f71df 100644
--- a/tests/test_templite.py
+++ b/tests/test_templite.py
@@ -8,7 +8,7 @@
import re
from types import SimpleNamespace
-from typing import Any, ContextManager, Dict, List, Optional
+from typing import Any, ContextManager
import pytest
@@ -27,8 +27,8 @@ class TempliteTest(CoverageTest):
def try_render(
self,
text: str,
- ctx: Optional[Dict[str, Any]] = None,
- result: Optional[str] = None,
+ ctx: dict[str, Any] | None = None,
+ result: str | None = None,
) -> None:
"""Render `text` through `ctx`, and it had better be `result`.
@@ -120,7 +120,7 @@ def test_loops(self) -> None:
"Look: 1, 2, 3, 4, done.",
)
# Loop iterables can be filtered.
- def rev(l: List[int]) -> List[int]:
+ def rev(l: list[int]) -> list[int]:
"""Return the reverse of `l`."""
l = l[:]
l.reverse()
diff --git a/tests/test_testing.py b/tests/test_testing.py
index 5c60a08a6..933384b4d 100644
--- a/tests/test_testing.py
+++ b/tests/test_testing.py
@@ -11,7 +11,6 @@
import sys
import warnings
-from typing import List, Tuple
import pytest
@@ -233,7 +232,7 @@ def method(
filename: str,
a: int = 17,
b: str = "hello",
- ) -> Tuple[int, str, int, str]:
+ ) -> tuple[int, str, int, str]:
"""The method we'll wrap, with args to be sure args work."""
return (self.x, filename, a, b)
@@ -376,7 +375,7 @@ class ArczTest(CoverageTest):
("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]),
("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]),
])
- def test_arcz_to_arcs(self, arcz: str, arcs: List[TArc]) -> None:
+ def test_arcz_to_arcs(self, arcz: str, arcs: list[TArc]) -> None:
assert arcz_to_arcs(arcz) == arcs
@pytest.mark.parametrize("arcs, arcz_repr", [
@@ -393,7 +392,7 @@ def test_arcz_to_arcs(self, arcz: str, arcs: List[TArc]) -> None:
),
),
])
- def test_arcs_to_arcz_repr(self, arcs: List[TArc], arcz_repr: str) -> None:
+ def test_arcs_to_arcz_repr(self, arcs: list[TArc], arcz_repr: str) -> None:
assert arcs_to_arcz_repr(arcs) == arcz_repr
diff --git a/tests/test_xml.py b/tests/test_xml.py
index 0b1ddd58b..ad915380a 100644
--- a/tests/test_xml.py
+++ b/tests/test_xml.py
@@ -9,7 +9,7 @@
import os.path
import re
-from typing import Any, Dict, Iterator, Tuple, Union
+from typing import Any, Iterator
from xml.etree import ElementTree
import pytest
@@ -64,7 +64,7 @@ def here(p: str) -> str:
def assert_source(
self,
- xmldom: Union[ElementTree.Element, ElementTree.ElementTree],
+ xmldom: ElementTree.Element | ElementTree.ElementTree,
src: str,
) -> None:
"""Assert that the XML has a element with `src`."""
@@ -379,7 +379,7 @@ def unbackslash(v: Any) -> Any:
class XmlPackageStructureTest(XmlTestHelpers, CoverageTest):
"""Tests about the package structure reported in the coverage.xml file."""
- def package_and_class_tags(self, cov: Coverage) -> Iterator[Tuple[str, Dict[str, Any]]]:
+ def package_and_class_tags(self, cov: Coverage) -> Iterator[tuple[str, dict[str, Any]]]:
"""Run an XML report on `cov`, and get the package and class tags."""
cov.xml_report()
dom = ElementTree.parse("coverage.xml")
From fc5ef3e38a74b0ceb16f584176b7f836cfe37021 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Tue, 27 Feb 2024 16:19:26 -0500
Subject: [PATCH 05/24] build: we can ignore style fixes for git blame
---
.git-blame-ignore-revs | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 7ebcb5d52..b985e23eb 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -21,3 +21,9 @@ d8daa08b347fe6b7099c437b09d926eb999d0803
# 2023-12-02 style: check_coverage close parens should be on their own line
5d0b5d4464b84adb6389c8894c207a323edb2b2b
+
+# 2024-02-27 style: fix COM812 Trailing comma missing
+e4e238a9ed8f2ad2b9060247591b4c057c2953bf
+
+# 2024-02-27 style: modernize type hints, a few more f-strings
+401a63bf08bdfd780b662f64d2dfe3603f2584dd
From f632c28c0d8c2a14a897f56a76d987f5b38a2200 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Wed, 28 Feb 2024 14:57:16 -0500
Subject: [PATCH 06/24] chore: make upgrade
---
coverage/tracer.pyi | 4 ++++
requirements/dev.pip | 30 +++++++++++++++---------------
requirements/kit.pip | 4 ++--
requirements/light-threads.pip | 4 ++--
requirements/mypy.pip | 6 +++---
requirements/pip-tools.pip | 2 +-
requirements/pip.pip | 4 ++--
requirements/pytest.pip | 4 ++--
requirements/tox.pip | 4 ++--
9 files changed, 33 insertions(+), 29 deletions(-)
diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi
index 14372d1e3..c5f1c2840 100644
--- a/coverage/tracer.pyi
+++ b/coverage/tracer.pyi
@@ -1,11 +1,14 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+"""Typing information for the constructs from our .c files."""
+
from typing import Any, Dict
from coverage.types import TFileDisposition, TTraceData, TTraceFn, TracerCore
class CFileDisposition(TFileDisposition):
+ """CFileDisposition is in ctracer/filedisp.c"""
canonical_filename: Any
file_tracer: Any
has_dynamic_filename: Any
@@ -16,6 +19,7 @@ class CFileDisposition(TFileDisposition):
def __init__(self) -> None: ...
class CTracer(TracerCore):
+ """CTracer is in ctracer/tracer.c"""
check_include: Any
concur_id_func: Any
data: TTraceData
diff --git a/requirements/dev.pip b/requirements/dev.pip
index 195904cf8..0c57bf0a4 100644
--- a/requirements/dev.pip
+++ b/requirements/dev.pip
@@ -4,13 +4,13 @@
#
# make upgrade
#
-astroid==3.0.3
+astroid==3.1.0
# via pylint
attrs==23.2.0
# via hypothesis
build==1.0.3
# via check-manifest
-cachetools==5.3.2
+cachetools==5.3.3
# via tox
certifi==2024.2.2
# via requests
@@ -47,7 +47,7 @@ flaky==3.7.0
# via -r requirements/pytest.in
greenlet==3.0.3
# via -r requirements/dev.in
-hypothesis==6.98.9
+hypothesis==6.98.13
# via -r requirements/pytest.in
idna==3.6
# via requests
@@ -56,7 +56,7 @@ importlib-metadata==7.0.1
# build
# keyring
# twine
-importlib-resources==6.1.1
+importlib-resources==6.1.2
# via keyring
iniconfig==2.0.0
# via pytest
@@ -66,7 +66,7 @@ jaraco-classes==3.3.1
# via keyring
jedi==0.19.1
# via pudb
-keyring==24.3.0
+keyring==24.3.1
# via twine
libsass==0.23.0
# via -r requirements/dev.in
@@ -108,19 +108,19 @@ pygments==2.17.2
# pudb
# readme-renderer
# rich
-pylint==3.0.3
+pylint==3.1.0
# via -r requirements/dev.in
pyproject-api==1.6.1
# via tox
pyproject-hooks==1.0.0
# via build
-pytest==8.0.1
+pytest==8.0.2
# via
# -r requirements/pytest.in
# pytest-xdist
pytest-xdist==3.5.0
# via -r requirements/pytest.in
-readme-renderer==42.0
+readme-renderer==43.0
# via
# -r requirements/dev.in
# twine
@@ -133,7 +133,7 @@ requests-toolbelt==1.0.0
# via twine
rfc3986==2.0.0
# via twine
-rich==13.7.0
+rich==13.7.1
# via twine
sortedcontainers==2.4.0
# via hypothesis
@@ -148,7 +148,7 @@ tomli==2.0.1
# pyproject-hooks
# pytest
# tox
-tomlkit==0.12.3
+tomlkit==0.12.4
# via pylint
tox==4.13.0
# via
@@ -158,7 +158,7 @@ tox-gh==1.3.1
# via -r requirements/tox.in
twine==5.0.0
# via -r requirements/dev.in
-typing-extensions==4.9.0
+typing-extensions==4.10.0
# via
# astroid
# pylint
@@ -168,13 +168,13 @@ urllib3==2.2.1
# via
# requests
# twine
-urwid==2.6.1
+urwid==2.6.7
# via
# pudb
# urwid-readline
-urwid-readline==0.13
+urwid-readline==0.14
# via pudb
-virtualenv==20.25.0
+virtualenv==20.25.1
# via
# -r requirements/pip.in
# tox
@@ -188,7 +188,7 @@ zipp==3.17.0
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via -r requirements/pip.in
-setuptools==69.1.0
+setuptools==69.1.1
# via
# -r requirements/pip.in
# check-manifest
diff --git a/requirements/kit.pip b/requirements/kit.pip
index 6178060c8..d4b09e1d2 100644
--- a/requirements/kit.pip
+++ b/requirements/kit.pip
@@ -38,7 +38,7 @@ tomli==2.0.1
# build
# cibuildwheel
# pyproject-hooks
-typing-extensions==4.9.0
+typing-extensions==4.10.0
# via cibuildwheel
wheel==0.42.0
# via -r requirements/kit.in
@@ -46,5 +46,5 @@ zipp==3.17.0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
-setuptools==69.1.0
+setuptools==69.1.1
# via -r requirements/kit.in
diff --git a/requirements/light-threads.pip b/requirements/light-threads.pip
index 25e58cb4f..884b7c4ae 100644
--- a/requirements/light-threads.pip
+++ b/requirements/light-threads.pip
@@ -8,7 +8,7 @@ cffi==1.16.0
# via -r requirements/light-threads.in
dnspython==2.6.1
# via eventlet
-eventlet==0.35.1
+eventlet==0.35.2
# via -r requirements/light-threads.in
gevent==24.2.1
# via -r requirements/light-threads.in
@@ -25,7 +25,7 @@ zope-interface==6.2
# via gevent
# The following packages are considered to be unsafe in a requirements file:
-setuptools==69.1.0
+setuptools==69.1.1
# via
# zope-event
# zope-interface
diff --git a/requirements/mypy.pip b/requirements/mypy.pip
index 8263a95b6..63d230656 100644
--- a/requirements/mypy.pip
+++ b/requirements/mypy.pip
@@ -16,7 +16,7 @@ execnet==2.0.2
# via pytest-xdist
flaky==3.7.0
# via -r requirements/pytest.in
-hypothesis==6.98.9
+hypothesis==6.98.13
# via -r requirements/pytest.in
iniconfig==2.0.0
# via pytest
@@ -30,7 +30,7 @@ pluggy==1.4.0
# via pytest
pygments==2.17.2
# via -r requirements/pytest.in
-pytest==8.0.1
+pytest==8.0.2
# via
# -r requirements/pytest.in
# pytest-xdist
@@ -42,5 +42,5 @@ tomli==2.0.1
# via
# mypy
# pytest
-typing-extensions==4.9.0
+typing-extensions==4.10.0
# via mypy
diff --git a/requirements/pip-tools.pip b/requirements/pip-tools.pip
index e18ace330..ab115fa84 100644
--- a/requirements/pip-tools.pip
+++ b/requirements/pip-tools.pip
@@ -31,5 +31,5 @@ zipp==3.17.0
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via pip-tools
-setuptools==69.1.0
+setuptools==69.1.1
# via pip-tools
diff --git a/requirements/pip.pip b/requirements/pip.pip
index 3e3a6362b..1a53df69d 100644
--- a/requirements/pip.pip
+++ b/requirements/pip.pip
@@ -10,11 +10,11 @@ filelock==3.13.1
# via virtualenv
platformdirs==4.2.0
# via virtualenv
-virtualenv==20.25.0
+virtualenv==20.25.1
# via -r requirements/pip.in
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via -r requirements/pip.in
-setuptools==69.1.0
+setuptools==69.1.1
# via -r requirements/pip.in
diff --git a/requirements/pytest.pip b/requirements/pytest.pip
index 502c0497a..a6548e3e0 100644
--- a/requirements/pytest.pip
+++ b/requirements/pytest.pip
@@ -16,7 +16,7 @@ execnet==2.0.2
# via pytest-xdist
flaky==3.7.0
# via -r requirements/pytest.in
-hypothesis==6.98.9
+hypothesis==6.98.13
# via -r requirements/pytest.in
iniconfig==2.0.0
# via pytest
@@ -26,7 +26,7 @@ pluggy==1.4.0
# via pytest
pygments==2.17.2
# via -r requirements/pytest.in
-pytest==8.0.1
+pytest==8.0.2
# via
# -r requirements/pytest.in
# pytest-xdist
diff --git a/requirements/tox.pip b/requirements/tox.pip
index 3e1dce2c6..a731d7e61 100644
--- a/requirements/tox.pip
+++ b/requirements/tox.pip
@@ -4,7 +4,7 @@
#
# make upgrade
#
-cachetools==5.3.2
+cachetools==5.3.3
# via tox
chardet==5.2.0
# via tox
@@ -40,5 +40,5 @@ tox==4.13.0
# tox-gh
tox-gh==1.3.1
# via -r requirements/tox.in
-virtualenv==20.25.0
+virtualenv==20.25.1
# via tox
From cb4f27989d4fa4514fb4cc39af1a243b0357b4f9 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 29 Feb 2024 19:30:52 -0500
Subject: [PATCH 07/24] docs: remove the deprecation warning from
Coverage.annotate
---
coverage/control.py | 6 ------
1 file changed, 6 deletions(-)
diff --git a/coverage/control.py b/coverage/control.py
index 7b790ea43..6f7f9a311 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -1098,12 +1098,6 @@ def annotate(
) -> None:
"""Annotate a list of modules.
- .. note::
-
- This method has been obsoleted by more modern reporting tools,
- including the :meth:`html_report` method. It will be removed in a
- future version.
-
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
From 23e0aca2b9193823f60e77487f870c55bee6bb4b Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 07:37:33 -0500
Subject: [PATCH 08/24] style: use an f-string in doc/conf.py
---
doc/conf.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/doc/conf.py b/doc/conf.py
index 4e5150973..cbf63efef 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -74,7 +74,7 @@
release_date = "February 23, 2024"
# @@@ end
-rst_epilog = """
+rst_epilog = f"""
.. |release_date| replace:: {release_date}
.. |coverage-equals-release| replace:: coverage=={release}
.. |doc-url| replace:: https://coverage.readthedocs.io/en/{release}
@@ -82,7 +82,7 @@
-""".format(release=release, release_date=release_date)
+"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
From 5c1ff11d4fc24fd25d4b8b9d44c7d2d8be63e6ee Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 08:08:17 -0500
Subject: [PATCH 09/24] fix(html): clicked line numbers now position accurately
---
CHANGES.rst | 2 +-
coverage/htmlfiles/style.css | 2 +-
coverage/htmlfiles/style.scss | 6 ++----
tests/gold/html/styled/style.css | 2 +-
4 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 3c3694456..b87ee7f53 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -23,7 +23,7 @@ upgrading your version of coverage.py.
Unreleased
----------
-Nothing yet.
+- Fix: clicking a line number in the HTML report now positions more accurately.
.. scriv-start-here
diff --git a/coverage/htmlfiles/style.css b/coverage/htmlfiles/style.css
index 2555fdfee..aec9cbef2 100644
--- a/coverage/htmlfiles/style.css
+++ b/coverage/htmlfiles/style.css
@@ -154,7 +154,7 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
#source p .n.highlight { background: #ffdd00; }
-#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; }
+#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; }
@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
diff --git a/coverage/htmlfiles/style.scss b/coverage/htmlfiles/style.scss
index 5b6cf373a..7e2e0bce4 100644
--- a/coverage/htmlfiles/style.scss
+++ b/coverage/htmlfiles/style.scss
@@ -426,11 +426,9 @@ $border-indicator-width: .2em;
}
a {
- // These two lines make anchors to the line scroll the line to be
+ // Make anchors to the line scroll the line to be
// visible beneath the fixed-position header.
- margin-top: -4em;
- padding-top: 4em;
-
+ scroll-margin-top: 6em;
text-decoration: none;
color: $light-gray4;
@include color-dark($dark-gray4);
diff --git a/tests/gold/html/styled/style.css b/tests/gold/html/styled/style.css
index 2555fdfee..aec9cbef2 100644
--- a/tests/gold/html/styled/style.css
+++ b/tests/gold/html/styled/style.css
@@ -154,7 +154,7 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
#source p .n.highlight { background: #ffdd00; }
-#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; }
+#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; }
@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
From 865e2b485b6d07a143d94c7016c7ac9c3f595e8e Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 08:49:46 -0500
Subject: [PATCH 10/24] chore(docs): make doc_upgrade
---
doc/requirements.pip | 52 +++++++++++++++++++++++--------------------
requirements/pins.pip | 4 +---
2 files changed, 29 insertions(+), 27 deletions(-)
diff --git a/doc/requirements.pip b/doc/requirements.pip
index ec756bc80..95a7c2077 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -4,17 +4,17 @@
#
# make doc_upgrade
#
-alabaster==0.7.13
+alabaster==0.7.16
# via sphinx
-attrs==23.1.0
+attrs==23.2.0
# via scriv
-babel==2.12.1
+babel==2.14.0
# via sphinx
-certifi==2023.5.7
+certifi==2024.2.2
# via requests
-charset-normalizer==3.1.0
+charset-normalizer==3.3.2
# via requests
-click==8.1.3
+click==8.1.7
# via
# click-log
# scriv
@@ -24,37 +24,41 @@ cogapp==3.3.0
# via -r doc/requirements.in
colorama==0.4.6
# via sphinx-autobuild
-docutils==0.18.1
+docutils==0.20.1
# via
# sphinx
# sphinx-rtd-theme
-idna==3.4
+idna==3.6
# via requests
imagesize==1.4.1
# via sphinx
-jinja2==3.1.2
+jinja2==3.1.3
# via
# scriv
# sphinx
livereload==2.6.3
# via sphinx-autobuild
-markupsafe==2.1.3
+markdown-it-py==3.0.0
+ # via scriv
+markupsafe==2.1.5
# via jinja2
-packaging==23.1
+mdurl==0.1.2
+ # via markdown-it-py
+packaging==23.2
# via sphinx
-pygments==2.15.1
+pygments==2.17.2
# via sphinx
requests==2.31.0
# via
# scriv
# sphinx
-scriv==1.3.1
+scriv==1.5.1
# via -r doc/requirements.in
six==1.16.0
# via livereload
snowballstemmer==2.2.0
# via sphinx
-sphinx==6.2.1
+sphinx==7.2.6
# via
# -r doc/requirements.in
# sphinx-autobuild
@@ -62,29 +66,29 @@ sphinx==6.2.1
# sphinx-rtd-theme
# sphinxcontrib-jquery
# sphinxcontrib-restbuilder
-sphinx-autobuild==2021.3.14
+sphinx-autobuild==2024.2.4
# via -r doc/requirements.in
-sphinx-code-tabs==0.5.3
+sphinx-code-tabs==0.5.5
# via -r doc/requirements.in
-sphinx-rtd-theme==1.2.2
+sphinx-rtd-theme==2.0.0
# via -r doc/requirements.in
-sphinxcontrib-applehelp==1.0.4
+sphinxcontrib-applehelp==1.0.8
# via sphinx
-sphinxcontrib-devhelp==1.0.2
+sphinxcontrib-devhelp==1.0.6
# via sphinx
-sphinxcontrib-htmlhelp==2.0.1
+sphinxcontrib-htmlhelp==2.0.5
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
-sphinxcontrib-qthelp==1.0.3
+sphinxcontrib-qthelp==1.0.7
# via sphinx
sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
-sphinxcontrib-serializinghtml==1.1.5
+sphinxcontrib-serializinghtml==1.1.10
# via sphinx
-tornado==6.3.2
+tornado==6.4
# via livereload
-urllib3==2.0.3
+urllib3==2.2.1
# via requests
diff --git a/requirements/pins.pip b/requirements/pins.pip
index 97e4b2974..f27dad10c 100644
--- a/requirements/pins.pip
+++ b/requirements/pins.pip
@@ -3,6 +3,4 @@
# Version pins, for use as a constraints file.
-# sphinx-rtd-theme wants <7
-# https://github.com/readthedocs/sphinx_rtd_theme/issues/1463
-Sphinx<7
+# None for now!
From 8cf5b2b454b85025692f1e44c279379bc306a9aa Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 08:53:23 -0500
Subject: [PATCH 11/24] build(docs): we can use doc8 again
---
doc/requirements.in | 2 +-
doc/requirements.pip | 14 +++++++++++++-
tox.ini | 2 +-
3 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/doc/requirements.in b/doc/requirements.in
index 6d1bd330f..795928f5f 100644
--- a/doc/requirements.in
+++ b/doc/requirements.in
@@ -15,6 +15,6 @@ sphinx-code-tabs
sphinxcontrib-restbuilder
# These aren't compatible atm with other library versions:
-#doc8
+doc8
#pyenchant
#sphinxcontrib-spelling
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 95a7c2077..77f9475be 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -24,8 +24,12 @@ cogapp==3.3.0
# via -r doc/requirements.in
colorama==0.4.6
# via sphinx-autobuild
+doc8==1.1.1
+ # via -r doc/requirements.in
docutils==0.20.1
# via
+ # doc8
+ # restructuredtext-lint
# sphinx
# sphinx-rtd-theme
idna==3.6
@@ -46,12 +50,18 @@ mdurl==0.1.2
# via markdown-it-py
packaging==23.2
# via sphinx
+pbr==6.0.0
+ # via stevedore
pygments==2.17.2
- # via sphinx
+ # via
+ # doc8
+ # sphinx
requests==2.31.0
# via
# scriv
# sphinx
+restructuredtext-lint==1.4.0
+ # via doc8
scriv==1.5.1
# via -r doc/requirements.in
six==1.16.0
@@ -88,6 +98,8 @@ sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
sphinxcontrib-serializinghtml==1.1.10
# via sphinx
+stevedore==5.2.0
+ # via doc8
tornado==6.4
# via livereload
urllib3==2.2.1
diff --git a/tox.ini b/tox.ini
index ecd6d65fa..f236fc407 100644
--- a/tox.ini
+++ b/tox.ini
@@ -75,7 +75,7 @@ allowlist_externals =
commands =
# If this command fails, see the comment at the top of doc/cmd.rst
python -m cogapp -cP --check --verbosity=1 doc/*.rst
- ;doc8 -q --ignore-path 'doc/_*' doc CHANGES.rst README.rst
+ doc8 -q --ignore-path 'doc/_*' doc CHANGES.rst README.rst
sphinx-build -b html -aEnqW doc doc/_build/html
rst2html.py --strict README.rst doc/_build/trash
- sphinx-build -b html -b linkcheck -aEnq doc doc/_build/html
From c0cf2adb1f5e9274f584713de708b1e9a3fa13b5 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 09:40:02 -0500
Subject: [PATCH 12/24] build(docs): re-enable docs packages we couldn't
install
---
Makefile | 6 +++++-
doc/requirements.in | 8 +++-----
doc/requirements.pip | 7 +++++++
3 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/Makefile b/Makefile
index 4ec3d724c..511dbc043 100644
--- a/Makefile
+++ b/Makefile
@@ -117,6 +117,9 @@ _upgrade:
doc_upgrade: export CUSTOM_COMPILE_COMMAND=make doc_upgrade
doc_upgrade: $(DOCBIN) ## Update the doc/requirements.pip file
+ @# I don't understand why, but pip-tools won't update versions in this
+ @# .pip file unless I remove it first:
+ rm doc/requirements.pip
$(DOCBIN)/pip install -q -r requirements/pip-tools.pip
$(DOCBIN)/$(PIP_COMPILE) -o doc/requirements.pip doc/requirements.in
@@ -254,7 +257,8 @@ docdev: dochtml ## Build docs, and auto-watch for changes.
PATH=$(DOCBIN):$(PATH) $(SPHINXAUTOBUILD) -b html doc doc/_build/html
docspell: $(DOCBIN) ## Run the spell checker on the docs.
- $(SPHINXBUILD) -b spelling doc doc/_spell
+ # Very mac-specific...
+ PYENCHANT_LIBRARY_PATH=/opt/homebrew/lib/libenchant-2.dylib $(SPHINXBUILD) -b spelling doc doc/_spell
##@ Publishing docs
diff --git a/doc/requirements.in b/doc/requirements.in
index 795928f5f..3b00a4082 100644
--- a/doc/requirements.in
+++ b/doc/requirements.in
@@ -7,14 +7,12 @@
-c ../requirements/pins.pip
cogapp
+doc8
+pyenchant
scriv # for writing GitHub releases
sphinx
sphinx-autobuild
sphinx_rtd_theme
sphinx-code-tabs
sphinxcontrib-restbuilder
-
-# These aren't compatible atm with other library versions:
-doc8
-#pyenchant
-#sphinxcontrib-spelling
+sphinxcontrib-spelling
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 77f9475be..8114af2f7 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -52,6 +52,10 @@ packaging==23.2
# via sphinx
pbr==6.0.0
# via stevedore
+pyenchant==3.2.2
+ # via
+ # -r doc/requirements.in
+ # sphinxcontrib-spelling
pygments==2.17.2
# via
# doc8
@@ -76,6 +80,7 @@ sphinx==7.2.6
# sphinx-rtd-theme
# sphinxcontrib-jquery
# sphinxcontrib-restbuilder
+ # sphinxcontrib-spelling
sphinx-autobuild==2024.2.4
# via -r doc/requirements.in
sphinx-code-tabs==0.5.5
@@ -98,6 +103,8 @@ sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
sphinxcontrib-serializinghtml==1.1.10
# via sphinx
+sphinxcontrib-spelling==8.0.0
+ # via -r doc/requirements.in
stevedore==5.2.0
# via doc8
tornado==6.4
From f466cedd41e3b866044c21fa3a0609c4658672e8 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Fri, 1 Mar 2024 11:28:42 -0500
Subject: [PATCH 13/24] build: the right way to force doc_upgrade to upgrade
---
Makefile | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/Makefile b/Makefile
index 511dbc043..109221d53 100644
--- a/Makefile
+++ b/Makefile
@@ -117,11 +117,8 @@ _upgrade:
doc_upgrade: export CUSTOM_COMPILE_COMMAND=make doc_upgrade
doc_upgrade: $(DOCBIN) ## Update the doc/requirements.pip file
- @# I don't understand why, but pip-tools won't update versions in this
- @# .pip file unless I remove it first:
- rm doc/requirements.pip
$(DOCBIN)/pip install -q -r requirements/pip-tools.pip
- $(DOCBIN)/$(PIP_COMPILE) -o doc/requirements.pip doc/requirements.in
+ $(DOCBIN)/$(PIP_COMPILE) --upgrade -o doc/requirements.pip doc/requirements.in
diff_upgrade: ## Summarize the last `make upgrade`
@# The sort flags sort by the package name first, then by the -/+, and
From 5a031b056c6f9613b3fa4ceb1a6b73774b56195c Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Wed, 6 Mar 2024 19:41:31 -0500
Subject: [PATCH 14/24] fix: correct the missing branch message for the last
case of a match/case
---
CHANGES.rst | 4 ++++
coverage/parser.py | 4 +++-
tests/test_parser.py | 28 +++++++++++++++-------------
3 files changed, 22 insertions(+), 14 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index b87ee7f53..eebfcc38f 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -23,6 +23,10 @@ upgrading your version of coverage.py.
Unreleased
----------
+- Fix: the last case of a match/case statement had an incorrect message if the
+ branch was missed. It said the pattern never matched, when actually the
+ branch is missed if the last case always matched.
+
- Fix: clicking a line number in the HTML report now positions more accurately.
diff --git a/coverage/parser.py b/coverage/parser.py
index 6cf73446e..5ad534701 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -1112,7 +1112,9 @@ def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
exits |= self.add_body_arcs(case.body, from_start=from_start)
last_start = case_start
if not had_wildcard:
- exits.add(from_start)
+ exits.add(
+ ArcStart(case_start, cause="the pattern on line {lineno} always matched"),
+ )
return exits
def _handle__NodeList(self, node: NodeList) -> set[ArcStart]:
diff --git a/tests/test_parser.py b/tests/test_parser.py
index f17c8f2be..7f50e55b5 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -902,21 +902,23 @@ def test_missing_arc_descriptions_bug460(self) -> None:
assert parser.missing_arc_description(2, -3) == "line 3 didn't finish the lambda on line 3"
@pytest.mark.skipif(not env.PYBEHAVIOR.match_case, reason="Match-case is new in 3.10")
- def test_match_case_with_default(self) -> None:
- parser = self.parse_text("""\
- for command in ["huh", "go home", "go n"]:
- match command.split():
- case ["go", direction] if direction in "nesw":
- match = f"go: {direction}"
- case ["go", _]:
- match = "no go"
- print(match)
+ def test_match_case(self) -> None:
+ parser = self.parse_text("""\
+ match command.split():
+ case ["go", direction] if direction in "nesw": # 2
+ match = f"go: {direction}"
+ case ["go", _]: # 4
+ match = "no go"
+ print(match) # 6
""")
- assert parser.missing_arc_description(3, 4) == (
- "line 3 didn't jump to line 4, because the pattern on line 3 never matched"
+ assert parser.missing_arc_description(2, 3) == (
+ "line 2 didn't jump to line 3, because the pattern on line 2 never matched"
)
- assert parser.missing_arc_description(3, 5) == (
- "line 3 didn't jump to line 5, because the pattern on line 3 always matched"
+ assert parser.missing_arc_description(2, 4) == (
+ "line 2 didn't jump to line 4, because the pattern on line 2 always matched"
+ )
+ assert parser.missing_arc_description(4, 6) == (
+ "line 4 didn't jump to line 6, because the pattern on line 4 always matched"
)
From 6289be831a2d4fbbee69b4726a0ad6fd978a8b76 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Wed, 6 Mar 2024 20:16:58 -0500
Subject: [PATCH 15/24] refactor: use dataclasses, no namedtuple
---
coverage/parser.py | 10 ++++++----
coverage/sysmon.py | 4 ++--
lab/benchmark/benchmark.py | 6 +++---
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/coverage/parser.py b/coverage/parser.py
index 5ad534701..959174c36 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -13,6 +13,7 @@
import token
import tokenize
+from dataclasses import dataclass
from types import CodeType
from typing import (
cast, Any, Callable, Dict, Iterable, List, Optional, Protocol, Sequence,
@@ -462,7 +463,8 @@ def _find_statements(self) -> Iterable[TLineNo]:
# AST analysis
#
-class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
+@dataclass(frozen=True, order=True)
+class ArcStart:
"""The information needed to start an arc.
`lineno` is the line number the arc starts from.
@@ -474,8 +476,8 @@ class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
to have `lineno` interpolated into it.
"""
- def __new__(cls, lineno: TLineNo, cause: str | None = None) -> ArcStart:
- return super().__new__(cls, lineno, cause)
+ lineno: TLineNo
+ cause: str = ""
class TAddArcFn(Protocol):
@@ -1256,7 +1258,7 @@ def _combine_finally_starts(self, starts: set[ArcStart], exits: set[ArcStart]) -
"""
causes = []
for start in sorted(starts):
- if start.cause is not None:
+ if start.cause:
causes.append(start.cause.format(lineno=start.lineno))
cause = " or ".join(causes)
exits = {ArcStart(xit.lineno, cause) for xit in exits}
diff --git a/coverage/sysmon.py b/coverage/sysmon.py
index 5e1371a92..65c5b6e77 100644
--- a/coverage/sysmon.py
+++ b/coverage/sysmon.py
@@ -5,7 +5,6 @@
from __future__ import annotations
-import dataclasses
import functools
import inspect
import os
@@ -14,6 +13,7 @@
import threading
import traceback
+from dataclasses import dataclass
from types import CodeType, FrameType
from typing import (
Any,
@@ -151,7 +151,7 @@ def _decorator(meth: AnyCallable) -> AnyCallable:
return _decorator
-@dataclasses.dataclass
+@dataclass
class CodeInfo:
"""The information we want about each code object."""
diff --git a/lab/benchmark/benchmark.py b/lab/benchmark/benchmark.py
index d0d6188fd..1184f0b3e 100644
--- a/lab/benchmark/benchmark.py
+++ b/lab/benchmark/benchmark.py
@@ -2,7 +2,6 @@
import collections
import contextlib
-import dataclasses
import itertools
import os
import random
@@ -13,6 +12,7 @@
import time
from pathlib import Path
+from dataclasses import dataclass
from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple
import tabulate
@@ -480,7 +480,7 @@ def __init__(self, path, slug):
self.toxenv = None
-@dataclasses.dataclass
+@dataclass
class Coverage:
"""A version of coverage.py to use, maybe None."""
@@ -537,7 +537,7 @@ def __init__(self, directory, slug="source", tweaks=None, env_vars=None):
)
-@dataclasses.dataclass
+@dataclass
class Env:
"""An environment to run a test suite in."""
From 8e302218a4193ec717dd6cd11f82630b0e461067 Mon Sep 17 00:00:00 2001
From: tanaydin sirin
Date: Sun, 10 Mar 2024 13:07:27 +0100
Subject: [PATCH 16/24] fix: correct the type of report:format in config.py
(#1754)
Fixes report commands "format" config type. Which should be string, but defined as boolean
---
coverage/config.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/coverage/config.py b/coverage/config.py
index 4e1c71f27..7a7cd540e 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -395,7 +395,7 @@ def copy(self) -> CoverageConfig:
("exclude_list", "report:exclude_lines", "regexlist"),
("exclude_also", "report:exclude_also", "regexlist"),
("fail_under", "report:fail_under", "float"),
- ("format", "report:format", "boolean"),
+ ("format", "report:format"),
("ignore_errors", "report:ignore_errors", "boolean"),
("include_namespace_packages", "report:include_namespace_packages", "boolean"),
("partial_always_list", "report:partial_branches_always", "regexlist"),
From 3d57a072ec6073988b4c775a9cffd431ef63ab5f Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Sun, 10 Mar 2024 08:20:04 -0400
Subject: [PATCH 17/24] docs: document the report:format setting
---
CHANGES.rst | 6 ++++++
CONTRIBUTORS.txt | 1 +
doc/config.rst | 13 +++++++++++++
3 files changed, 20 insertions(+)
diff --git a/CHANGES.rst b/CHANGES.rst
index eebfcc38f..1ae94d2a4 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -29,6 +29,12 @@ Unreleased
- Fix: clicking a line number in the HTML report now positions more accurately.
+- Fix: the ``report:format`` setting was defined as a boolean, but should be a
+ string. Thanks, `Tanaydin Sirin `_. It is also now documented
+ on the :ref:`configuration page `.
+
+.. _pull 1754: https://github.com/nedbat/coveragepy/pull/1754
+
.. scriv-start-here
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index ddf76e714..0c36cc46b 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -209,6 +209,7 @@ Steve Leonard
Steve Oswald
Steve Peak
Sviatoslav Sydorenko
+Tanaydin Sirin
Teake Nutma
Ted Wexler
Thijs Triemstra
diff --git a/doc/config.rst b/doc/config.rst
index 540ec780a..4485145c0 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -627,6 +627,19 @@ use of the decimal places. A setting of 100 will fail any value under 100,
regardless of the number of decimal places of precision.
+.. _config_report_format:
+
+[report] format
+...............
+
+(string, default "text") The format to use for the textual report. The default
+is "text" which produces a simple textual table. You can use "markdown" to
+produce a Markdown table, or "total" to output only the total coverage
+percentage.
+
+.. versionadded:: 7.0
+
+
.. _config_report_ignore_errors:
[report] ignore_errors
From 1ef020d7b68c4890d31228f01b4491ad4ab06244 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 11:39:50 -0400
Subject: [PATCH 18/24] build: more cheats for convenient URLs
---
igor.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/igor.py b/igor.py
index 6c4e9fcbf..b96bacf9c 100644
--- a/igor.py
+++ b/igor.py
@@ -454,15 +454,19 @@ def do_cheats():
+ f"[coverage {facts.ver}](https://pypi.org/project/coverage/{facts.ver}).",
)
- print("\n## To run this code:")
+ print("\n## To install this code:")
if facts.branch == "master":
print(f"python3 -m pip install git+{github}#{egg}")
else:
print(f"python3 -m pip install git+{github}@{facts.branch}#{egg}")
print(f"python3 -m pip install git+{github}@{facts.sha[:20]}#{egg}")
+ print("\n## To read this code on GitHub:")
+ print(f"https://github.com/nedbat/coveragepy/commit/{facts.sha}")
+ print(f"https://github.com/nedbat/coveragepy/commits/{facts.sha}")
+
print(
- "\n## For other collaborators:\n"
+ "\n## For other collaborators to get this code:\n"
+ f"git clone {github}\n"
+ f"cd {repo.partition('/')[-1]}\n"
+ f"git checkout {facts.sha}",
From 1b19799edfbfc65dea254e8fcaa0dea23e9709f9 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 11:33:50 -0400
Subject: [PATCH 19/24] fix: ensure absolute paths are relative when combined
#1752
If two data files are combined, one with absolute paths, and one with relative,
with relative_files=True in effect, the results depended on the order of
combining.
If the absolute files were seen first, they were added as absolute paths. If
the relative files were seen first, a mapping rule was generated that would then
remap the absolute paths when they were seen.
This fix ensures that absolute paths are remapped even if they are seen first.
---
CHANGES.rst | 8 ++++++++
coverage/data.py | 5 ++++-
coverage/files.py | 3 +++
tests/test_api.py | 20 ++++++++++++++++++++
4 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 1ae94d2a4..3e4589fd8 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -23,6 +23,13 @@ upgrading your version of coverage.py.
Unreleased
----------
+- Fix: in some cases, even with ``[run] relative_files=True``, a data file
+ could be created with absolute path names. When combined with other relative
+ data files, it was random whether the absolute file names would be made
+ relative or not. If they weren't, then a file would be listed twice in
+ reports, as detailed in `issue 1752`_. This is now fixed: absolute file
+ names are always made relative when combining.
+
- Fix: the last case of a match/case statement had an incorrect message if the
branch was missed. It said the pattern never matched, when actually the
branch is missed if the last case always matched.
@@ -33,6 +40,7 @@ Unreleased
string. Thanks, `Tanaydin Sirin `_. It is also now documented
on the :ref:`configuration page `.
+.. _issue 1752: https://github.com/nedbat/coveragepy/issues/1752
.. _pull 1754: https://github.com/nedbat/coveragepy/pull/1754
diff --git a/coverage/data.py b/coverage/data.py
index 0db07d156..9513adfca 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -88,7 +88,10 @@ def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) ->
# We never want to combine those.
files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")]
- return files_to_combine
+ # Sorting isn't usually needed, since it shouldn't matter what order files
+ # are combined, but sorting makes tests more predictable, and makes
+ # debugging more understandable when things go wrong.
+ return sorted(files_to_combine)
def combine_parallel_data(
diff --git a/coverage/files.py b/coverage/files.py
index 71352b8eb..0dd3c4e01 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -489,6 +489,9 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str:
# If we get here, no pattern matched.
+ if self.relative:
+ path = relative_filename(path)
+
if self.relative and not isabs_anywhere(path):
# Auto-generate a pattern to implicitly match relative files
parts = re.split(r"[/\\]", path)
diff --git a/tests/test_api.py b/tests/test_api.py
index b6ab9cda0..9f65166b9 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -1465,3 +1465,23 @@ def test_combine_parallel_data_keep(self) -> None:
# After combining, the .coverage file & the original combined file should still be there.
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 2)
+
+ @pytest.mark.parametrize("abs_order, rel_order", [(1, 2), (2, 1)])
+ def test_combine_absolute_then_relative_1752(self, abs_order: int, rel_order: int) -> None:
+ # https://github.com/nedbat/coveragepy/issues/1752
+ # If we're combining a relative data file and an absolute data file,
+ # the absolutes were made relative only if the relative file name was
+ # encountered first. Test combining in both orders and check that the
+ # absolute file name is properly relative in either order.
+ FILE = "sub/myprog.py"
+ self.make_file(FILE, "a = 1")
+
+ self.make_data_file(suffix=f"{abs_order}.abs", lines={abs_file(FILE): [1]})
+ self.make_data_file(suffix=f"{rel_order}.rel", lines={FILE: [1]})
+
+ self.make_file(".coveragerc", "[run]\nrelative_files = True\n")
+ cov = coverage.Coverage()
+ cov.combine()
+ data = coverage.CoverageData()
+ data.read()
+ assert {os_sep("sub/myprog.py")} == data.measured_files()
From f30818ec9295c3057b9ea47a756364dbe5ea3bf3 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 14:21:09 -0400
Subject: [PATCH 20/24] chore: make upgrade
---
requirements/dev.pip | 26 +++++++++++++-------------
requirements/kit.pip | 16 ++++++++--------
requirements/light-threads.pip | 2 +-
requirements/mypy.pip | 10 +++++-----
requirements/pip-tools.pip | 14 +++++++-------
requirements/pip.pip | 2 +-
requirements/pytest.pip | 8 ++++----
requirements/tox.pip | 4 ++--
8 files changed, 41 insertions(+), 41 deletions(-)
diff --git a/requirements/dev.pip b/requirements/dev.pip
index 0c57bf0a4..46f4161c2 100644
--- a/requirements/dev.pip
+++ b/requirements/dev.pip
@@ -8,7 +8,7 @@ astroid==3.1.0
# via pylint
attrs==23.2.0
# via hypothesis
-build==1.0.3
+build==1.1.1
# via check-manifest
cachetools==5.3.3
# via tox
@@ -20,7 +20,7 @@ charset-normalizer==3.3.2
# via requests
check-manifest==0.49
# via -r requirements/dev.in
-cogapp==3.3.0
+cogapp==3.4.1
# via -r requirements/dev.in
colorama==0.4.6
# via
@@ -43,20 +43,20 @@ filelock==3.13.1
# via
# tox
# virtualenv
-flaky==3.7.0
+flaky==3.8.1
# via -r requirements/pytest.in
greenlet==3.0.3
# via -r requirements/dev.in
-hypothesis==6.98.13
+hypothesis==6.99.6
# via -r requirements/pytest.in
idna==3.6
# via requests
-importlib-metadata==7.0.1
+importlib-metadata==7.0.2
# via
# build
# keyring
# twine
-importlib-resources==6.1.2
+importlib-resources==6.3.0
# via keyring
iniconfig==2.0.0
# via pytest
@@ -80,7 +80,7 @@ more-itertools==10.2.0
# via jaraco-classes
nh3==0.2.15
# via readme-renderer
-packaging==23.2
+packaging==24.0
# via
# build
# pudb
@@ -89,7 +89,7 @@ packaging==23.2
# tox
parso==0.8.3
# via jedi
-pkginfo==1.9.6
+pkginfo==1.10.0
# via twine
platformdirs==4.2.0
# via
@@ -114,7 +114,7 @@ pyproject-api==1.6.1
# via tox
pyproject-hooks==1.0.0
# via build
-pytest==8.0.2
+pytest==8.1.1
# via
# -r requirements/pytest.in
# pytest-xdist
@@ -150,7 +150,7 @@ tomli==2.0.1
# tox
tomlkit==0.12.4
# via pylint
-tox==4.13.0
+tox==4.14.1
# via
# -r requirements/tox.in
# tox-gh
@@ -168,7 +168,7 @@ urllib3==2.2.1
# via
# requests
# twine
-urwid==2.6.7
+urwid==2.6.9
# via
# pudb
# urwid-readline
@@ -180,7 +180,7 @@ virtualenv==20.25.1
# tox
wcwidth==0.2.13
# via urwid
-zipp==3.17.0
+zipp==3.18.0
# via
# importlib-metadata
# importlib-resources
@@ -188,7 +188,7 @@ zipp==3.17.0
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via -r requirements/pip.in
-setuptools==69.1.1
+setuptools==69.2.0
# via
# -r requirements/pip.in
# check-manifest
diff --git a/requirements/kit.pip b/requirements/kit.pip
index d4b09e1d2..4f9c187eb 100644
--- a/requirements/kit.pip
+++ b/requirements/kit.pip
@@ -10,26 +10,26 @@ bashlex==0.18
# via cibuildwheel
bracex==2.4
# via cibuildwheel
-build==1.0.3
+build==1.1.1
# via -r requirements/kit.in
certifi==2024.2.2
# via cibuildwheel
-cibuildwheel==2.16.5
+cibuildwheel==2.17.0
# via -r requirements/kit.in
colorama==0.4.6
# via -r requirements/kit.in
filelock==3.13.1
# via cibuildwheel
-importlib-metadata==7.0.1
+importlib-metadata==7.0.2
# via build
-packaging==23.2
+packaging==24.0
# via
# auditwheel
# build
# cibuildwheel
platformdirs==4.2.0
# via cibuildwheel
-pyelftools==0.30
+pyelftools==0.31
# via auditwheel
pyproject-hooks==1.0.0
# via build
@@ -40,11 +40,11 @@ tomli==2.0.1
# pyproject-hooks
typing-extensions==4.10.0
# via cibuildwheel
-wheel==0.42.0
+wheel==0.43.0
# via -r requirements/kit.in
-zipp==3.17.0
+zipp==3.18.0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
-setuptools==69.1.1
+setuptools==69.2.0
# via -r requirements/kit.in
diff --git a/requirements/light-threads.pip b/requirements/light-threads.pip
index 884b7c4ae..a2a3f92dc 100644
--- a/requirements/light-threads.pip
+++ b/requirements/light-threads.pip
@@ -25,7 +25,7 @@ zope-interface==6.2
# via gevent
# The following packages are considered to be unsafe in a requirements file:
-setuptools==69.1.1
+setuptools==69.2.0
# via
# zope-event
# zope-interface
diff --git a/requirements/mypy.pip b/requirements/mypy.pip
index 63d230656..85d794482 100644
--- a/requirements/mypy.pip
+++ b/requirements/mypy.pip
@@ -14,23 +14,23 @@ exceptiongroup==1.2.0
# pytest
execnet==2.0.2
# via pytest-xdist
-flaky==3.7.0
+flaky==3.8.1
# via -r requirements/pytest.in
-hypothesis==6.98.13
+hypothesis==6.99.6
# via -r requirements/pytest.in
iniconfig==2.0.0
# via pytest
-mypy==1.8.0
+mypy==1.9.0
# via -r requirements/mypy.in
mypy-extensions==1.0.0
# via mypy
-packaging==23.2
+packaging==24.0
# via pytest
pluggy==1.4.0
# via pytest
pygments==2.17.2
# via -r requirements/pytest.in
-pytest==8.0.2
+pytest==8.1.1
# via
# -r requirements/pytest.in
# pytest-xdist
diff --git a/requirements/pip-tools.pip b/requirements/pip-tools.pip
index ab115fa84..1b920320b 100644
--- a/requirements/pip-tools.pip
+++ b/requirements/pip-tools.pip
@@ -4,15 +4,15 @@
#
# make upgrade
#
-build==1.0.3
+build==1.1.1
# via pip-tools
click==8.1.7
# via pip-tools
-importlib-metadata==7.0.1
+importlib-metadata==7.0.2
# via build
-packaging==23.2
+packaging==24.0
# via build
-pip-tools==7.4.0
+pip-tools==7.4.1
# via -r requirements/pip-tools.in
pyproject-hooks==1.0.0
# via
@@ -23,13 +23,13 @@ tomli==2.0.1
# build
# pip-tools
# pyproject-hooks
-wheel==0.42.0
+wheel==0.43.0
# via pip-tools
-zipp==3.17.0
+zipp==3.18.0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via pip-tools
-setuptools==69.1.1
+setuptools==69.2.0
# via pip-tools
diff --git a/requirements/pip.pip b/requirements/pip.pip
index 1a53df69d..754f62a3f 100644
--- a/requirements/pip.pip
+++ b/requirements/pip.pip
@@ -16,5 +16,5 @@ virtualenv==20.25.1
# The following packages are considered to be unsafe in a requirements file:
pip==24.0
# via -r requirements/pip.in
-setuptools==69.1.1
+setuptools==69.2.0
# via -r requirements/pip.in
diff --git a/requirements/pytest.pip b/requirements/pytest.pip
index a6548e3e0..7850ef659 100644
--- a/requirements/pytest.pip
+++ b/requirements/pytest.pip
@@ -14,19 +14,19 @@ exceptiongroup==1.2.0
# pytest
execnet==2.0.2
# via pytest-xdist
-flaky==3.7.0
+flaky==3.8.1
# via -r requirements/pytest.in
-hypothesis==6.98.13
+hypothesis==6.99.6
# via -r requirements/pytest.in
iniconfig==2.0.0
# via pytest
-packaging==23.2
+packaging==24.0
# via pytest
pluggy==1.4.0
# via pytest
pygments==2.17.2
# via -r requirements/pytest.in
-pytest==8.0.2
+pytest==8.1.1
# via
# -r requirements/pytest.in
# pytest-xdist
diff --git a/requirements/tox.pip b/requirements/tox.pip
index a731d7e61..b1098c25c 100644
--- a/requirements/tox.pip
+++ b/requirements/tox.pip
@@ -18,7 +18,7 @@ filelock==3.13.1
# via
# tox
# virtualenv
-packaging==23.2
+packaging==24.0
# via
# pyproject-api
# tox
@@ -34,7 +34,7 @@ tomli==2.0.1
# via
# pyproject-api
# tox
-tox==4.13.0
+tox==4.14.1
# via
# -r requirements/tox.in
# tox-gh
From e06e4f9fbc90d4079a7559e8a2447f8722d062b8 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 14:21:40 -0400
Subject: [PATCH 21/24] chore: make doc_upgrade
---
doc/requirements.pip | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 8114af2f7..78f27e095 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -20,7 +20,7 @@ click==8.1.7
# scriv
click-log==0.4.0
# via scriv
-cogapp==3.3.0
+cogapp==3.4.1
# via -r doc/requirements.in
colorama==0.4.6
# via sphinx-autobuild
@@ -48,7 +48,7 @@ markupsafe==2.1.5
# via jinja2
mdurl==0.1.2
# via markdown-it-py
-packaging==23.2
+packaging==24.0
# via sphinx
pbr==6.0.0
# via stevedore
From a5361616e3e377e48738473bbfdcdcc2b8a4fbeb Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 14:27:25 -0400
Subject: [PATCH 22/24] docs: thanks, Bruno Rodrigues dos Santos
---
CHANGES.rst | 3 ++-
CONTRIBUTORS.txt | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 3e4589fd8..c6b098232 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -28,7 +28,8 @@ Unreleased
data files, it was random whether the absolute file names would be made
relative or not. If they weren't, then a file would be listed twice in
reports, as detailed in `issue 1752`_. This is now fixed: absolute file
- names are always made relative when combining.
+ names are always made relative when combining. Thanks to Bruno Rodrigues dos
+ Santos for support.
- Fix: the last case of a match/case statement had an incorrect message if the
branch was missed. It said the pattern never matched, when actually the
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index 0c36cc46b..9c063c20f 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -38,6 +38,7 @@ Brett Cannon
Brian Grohe
Bruno Oliveira
Bruno P. Kinoshita
+Bruno Rodrigues dos Santos
Buck Evan
Buck Golemon
Calen Pennington
From 9b0008b44d01b41d97841505e03e0a1462b45f13 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 14:39:07 -0400
Subject: [PATCH 23/24] docs: prep for 7.4.4
---
CHANGES.rst | 10 ++++++----
coverage/version.py | 4 ++--
doc/conf.py | 6 +++---
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index c6b098232..7c7667d39 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -20,8 +20,12 @@ upgrading your version of coverage.py.
.. Version 9.8.1 — 2027-07-27
.. --------------------------
-Unreleased
-----------
+.. scriv-start-here
+
+.. _changes_7-4-4:
+
+Version 7.4.4 — 2024-03-14
+--------------------------
- Fix: in some cases, even with ``[run] relative_files=True``, a data file
could be created with absolute path names. When combined with other relative
@@ -45,8 +49,6 @@ Unreleased
.. _pull 1754: https://github.com/nedbat/coveragepy/pull/1754
-.. scriv-start-here
-
.. _changes_7-4-3:
Version 7.4.3 — 2024-02-23
diff --git a/coverage/version.py b/coverage/version.py
index 0d7f6093e..10f4115ef 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,8 +8,8 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 4, 4, "alpha", 0)
-_dev = 1
+version_info = (7, 4, 4, "final", 0)
+_dev = 0
def _make_version(
diff --git a/doc/conf.py b/doc/conf.py
index cbf63efef..5bdcca17b 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -67,11 +67,11 @@
# @@@ editable
copyright = "2009–2024, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
-version = "7.4.3"
+version = "7.4.4"
# The full version, including alpha/beta/rc tags.
-release = "7.4.3"
+release = "7.4.4"
# The date of release, in "monthname day, year" format.
-release_date = "February 23, 2024"
+release_date = "March 14, 2024"
# @@@ end
rst_epilog = f"""
From bc5e2d7453f9766a143243d9fa72b0acd75f517e Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Thu, 14 Mar 2024 14:39:28 -0400
Subject: [PATCH 24/24] docs: sample HTML for 7.4.4
---
.../d_7b071bdc2a35fa80___init___py.html | 10 +++++-----
.../d_7b071bdc2a35fa80___main___py.html | 8 ++++----
.../d_7b071bdc2a35fa80_cogapp_py.html | 16 ++++++++--------
.../d_7b071bdc2a35fa80_makefiles_py.html | 8 ++++----
.../d_7b071bdc2a35fa80_test_cogapp_py.html | 8 ++++----
.../d_7b071bdc2a35fa80_test_makefiles_py.html | 8 ++++----
.../d_7b071bdc2a35fa80_test_whiteutils_py.html | 8 ++++----
doc/sample_html/d_7b071bdc2a35fa80_utils_py.html | 8 ++++----
.../d_7b071bdc2a35fa80_whiteutils_py.html | 8 ++++----
doc/sample_html/index.html | 8 ++++----
doc/sample_html/status.json | 2 +-
doc/sample_html/style.css | 2 +-
12 files changed, 47 insertions(+), 47 deletions(-)
diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
index 8ecdb82c7..5e04da820 100644
--- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
+++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
@@ -66,8 +66,8 @@