diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 7ebcb5d52..b985e23eb 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -21,3 +21,9 @@ d8daa08b347fe6b7099c437b09d926eb999d0803 # 2023-12-02 style: check_coverage close parens should be on their own line 5d0b5d4464b84adb6389c8894c207a323edb2b2b + +# 2024-02-27 style: fix COM812 Trailing comma missing +e4e238a9ed8f2ad2b9060247591b4c057c2953bf + +# 2024-02-27 style: modernize type hints, a few more f-strings +401a63bf08bdfd780b662f64d2dfe3603f2584dd diff --git a/CHANGES.rst b/CHANGES.rst index 6deb2074d..7c7667d39 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,6 +22,33 @@ upgrading your version of coverage.py. .. scriv-start-here +.. _changes_7-4-4: + +Version 7.4.4 — 2024-03-14 +-------------------------- + +- Fix: in some cases, even with ``[run] relative_files=True``, a data file + could be created with absolute path names. When combined with other relative + data files, it was random whether the absolute file names would be made + relative or not. If they weren't, then a file would be listed twice in + reports, as detailed in `issue 1752`_. This is now fixed: absolute file + names are always made relative when combining. Thanks to Bruno Rodrigues dos + Santos for support. + +- Fix: the last case of a match/case statement had an incorrect message if the + branch was missed. It said the pattern never matched, when actually the + branch is missed if the last case always matched. + +- Fix: clicking a line number in the HTML report now positions more accurately. + +- Fix: the ``report:format`` setting was defined as a boolean, but should be a + string. Thanks, `Tanaydin Sirin `_. It is also now documented + on the :ref:`configuration page `. + +.. _issue 1752: https://github.com/nedbat/coveragepy/issues/1752 +.. _pull 1754: https://github.com/nedbat/coveragepy/pull/1754 + + .. _changes_7-4-3: Version 7.4.3 — 2024-02-23 diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index ddf76e714..9c063c20f 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -38,6 +38,7 @@ Brett Cannon Brian Grohe Bruno Oliveira Bruno P. Kinoshita +Bruno Rodrigues dos Santos Buck Evan Buck Golemon Calen Pennington @@ -209,6 +210,7 @@ Steve Leonard Steve Oswald Steve Peak Sviatoslav Sydorenko +Tanaydin Sirin Teake Nutma Ted Wexler Thijs Triemstra diff --git a/Makefile b/Makefile index 4ec3d724c..109221d53 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ _upgrade: doc_upgrade: export CUSTOM_COMPILE_COMMAND=make doc_upgrade doc_upgrade: $(DOCBIN) ## Update the doc/requirements.pip file $(DOCBIN)/pip install -q -r requirements/pip-tools.pip - $(DOCBIN)/$(PIP_COMPILE) -o doc/requirements.pip doc/requirements.in + $(DOCBIN)/$(PIP_COMPILE) --upgrade -o doc/requirements.pip doc/requirements.in diff_upgrade: ## Summarize the last `make upgrade` @# The sort flags sort by the package name first, then by the -/+, and @@ -254,7 +254,8 @@ docdev: dochtml ## Build docs, and auto-watch for changes. PATH=$(DOCBIN):$(PATH) $(SPHINXAUTOBUILD) -b html doc doc/_build/html docspell: $(DOCBIN) ## Run the spell checker on the docs. - $(SPHINXBUILD) -b spelling doc doc/_spell + # Very mac-specific... + PYENCHANT_LIBRARY_PATH=/opt/homebrew/lib/libenchant-2.dylib $(SPHINXBUILD) -b spelling doc doc/_spell ##@ Publishing docs diff --git a/ci/download_gha_artifacts.py b/ci/download_gha_artifacts.py index d91add0c3..e656b6d2e 100644 --- a/ci/download_gha_artifacts.py +++ b/ci/download_gha_artifacts.py @@ -99,7 +99,7 @@ def main(owner_repo, artifact_pattern, dest_dir): print( f"Downloading {artifact['name']}, " + f"size: {artifact['size_in_bytes']}, " - + f"created: {utc2local(artifact['created_at'])}" + + f"created: {utc2local(artifact['created_at'])}", ) download_url(artifact["archive_download_url"], temp_zip) unpack_zipfile(temp_zip) diff --git a/coverage/__init__.py b/coverage/__init__.py index d22d09deb..c3403d444 100644 --- a/coverage/__init__.py +++ b/coverage/__init__.py @@ -39,4 +39,3 @@ # On Windows, we encode and decode deep enough that something goes wrong and # the encodings.utf_8 module is loaded and then unloaded, I don't know why. # Adding a reference here prevents it from being unloaded. Yuk. -import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order diff --git a/coverage/annotate.py b/coverage/annotate.py index 2ef89c967..46a82a81d 100644 --- a/coverage/annotate.py +++ b/coverage/annotate.py @@ -8,7 +8,7 @@ import os import re -from typing import Iterable, Optional, TYPE_CHECKING +from typing import Iterable, TYPE_CHECKING from coverage.files import flat_rootname from coverage.misc import ensure_dir, isolate_module @@ -48,12 +48,12 @@ class AnnotateReporter: def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config - self.directory: Optional[str] = None + self.directory: str | None = None blank_re = re.compile(r"\s*(#|$)") else_re = re.compile(r"\s*else\s*:\s*(#|$)") - def report(self, morfs: Optional[Iterable[TMorf]], directory: Optional[str] = None) -> None: + def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None: """Run the report. See `coverage.report()` for arguments. diff --git a/coverage/cmdline.py b/coverage/cmdline.py index 5379c7c5f..463ea8fde 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -14,7 +14,7 @@ import textwrap import traceback -from typing import cast, Any, List, NoReturn, Optional, Tuple +from typing import cast, Any, NoReturn import coverage from coverage import Coverage @@ -281,7 +281,7 @@ class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass - def parse_args_ok(self, args: List[str]) -> Tuple[bool, Optional[optparse.Values], List[str]]: + def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: """Call optparse.parse_args, but return a triple: (ok, options, args) @@ -317,9 +317,9 @@ class CmdOptionParser(CoverageOptionParser): def __init__( self, action: str, - options: List[optparse.Option], + options: list[optparse.Option], description: str, - usage: Optional[str] = None, + usage: str | None = None, ): """Create an OptionParser for a coverage.py command. @@ -419,7 +419,7 @@ def get_prog_name(self) -> str: "erase": CmdOptionParser( "erase", [ - Opts.datafile + Opts.datafile, ] + GLOBAL_ARGS, description="Erase previously collected coverage data.", ), @@ -549,9 +549,9 @@ def get_prog_name(self) -> str: def show_help( - error: Optional[str] = None, - topic: Optional[str] = None, - parser: Optional[optparse.OptionParser] = None, + error: str | None = None, + topic: str | None = None, + parser: optparse.OptionParser | None = None, ) -> None: """Display an error message, or the named topic.""" assert error or topic or parser @@ -605,7 +605,7 @@ def __init__(self) -> None: self.global_option = False self.coverage: Coverage - def command_line(self, argv: List[str]) -> int: + def command_line(self, argv: list[str]) -> int: """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. @@ -620,7 +620,7 @@ def command_line(self, argv: List[str]) -> int: # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. - parser: Optional[optparse.OptionParser] + parser: optparse.OptionParser | None self.global_option = argv[0].startswith("-") if self.global_option: parser = GlobalOptionParser() @@ -712,7 +712,7 @@ def command_line(self, argv: List[str]) -> int: skip_empty=options.skip_empty, sort=options.sort, output_format=options.format, - **report_args + **report_args, ) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) @@ -724,25 +724,25 @@ def command_line(self, argv: List[str]) -> int: skip_empty=options.skip_empty, show_contexts=options.show_contexts, title=options.title, - **report_args + **report_args, ) elif options.action == "xml": total = self.coverage.xml_report( outfile=options.outfile, skip_empty=options.skip_empty, - **report_args + **report_args, ) elif options.action == "json": total = self.coverage.json_report( outfile=options.outfile, pretty_print=options.pretty_print, show_contexts=options.show_contexts, - **report_args + **report_args, ) elif options.action == "lcov": total = self.coverage.lcov_report( outfile=options.outfile, - **report_args + **report_args, ) else: # There are no other possible actions. @@ -772,7 +772,7 @@ def command_line(self, argv: List[str]) -> int: def do_help( self, options: optparse.Values, - args: List[str], + args: list[str], parser: optparse.OptionParser, ) -> bool: """Deal with help requests. @@ -807,7 +807,7 @@ def do_help( return False - def do_run(self, options: optparse.Values, args: List[str]) -> int: + def do_run(self, options: optparse.Values, args: list[str]) -> int: """Implementation of 'coverage run'.""" if not args: @@ -839,7 +839,7 @@ def do_run(self, options: optparse.Values, args: List[str]) -> int: show_help( "Options affecting multiprocessing must only be specified " + "in a configuration file.\n" + - f"Remove --{opt_name} from the command line." + f"Remove --{opt_name} from the command line.", ) return ERR @@ -866,7 +866,7 @@ def do_run(self, options: optparse.Values, args: List[str]) -> int: return OK - def do_debug(self, args: List[str]) -> int: + def do_debug(self, args: list[str]) -> int: """Implementation of 'coverage debug'.""" if not args: @@ -899,7 +899,7 @@ def do_debug(self, args: List[str]) -> int: return OK -def unshell_list(s: str) -> Optional[List[str]]: +def unshell_list(s: str) -> list[str] | None: """Turn a command-line argument into a list.""" if not s: return None @@ -913,7 +913,7 @@ def unshell_list(s: str) -> Optional[List[str]]: return s.split(",") -def unglob_args(args: List[str]) -> List[str]: +def unglob_args(args: list[str]) -> list[str]: """Interpret shell wildcards for platforms that need it.""" if env.WINDOWS: globbed = [] @@ -958,7 +958,7 @@ def unglob_args(args: List[str]) -> List[str]: } -def main(argv: Optional[List[str]] = None) -> Optional[int]: +def main(argv: list[str] | None = None) -> int | None: """The main entry point to coverage.py. This is installed as the script entry point. @@ -997,8 +997,8 @@ def main(argv: Optional[List[str]] = None) -> Optional[int]: original_main = main def main( # pylint: disable=function-redefined - argv: Optional[List[str]] = None, - ) -> Optional[int]: + argv: list[str] | None = None, + ) -> int | None: """A wrapper around main that profiles.""" profiler = SimpleLauncher.launch() try: diff --git a/coverage/collector.py b/coverage/collector.py index dcb8a30dd..9bd380c2e 100644 --- a/coverage/collector.py +++ b/coverage/collector.py @@ -11,7 +11,7 @@ from types import FrameType from typing import ( - cast, Any, Callable, Dict, List, Mapping, Optional, Set, Type, TypeVar, + cast, Any, Callable, Dict, List, Mapping, Set, TypeVar, ) from coverage import env @@ -70,7 +70,7 @@ class Collector: # The stack of active Collectors. Collectors are added here when started, # and popped when stopped. Collectors on the stack are paused when not # the top, and resumed when they become the top again. - _collectors: List[Collector] = [] + _collectors: list[Collector] = [] # The concurrency settings we support here. LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} @@ -79,12 +79,12 @@ def __init__( self, should_trace: Callable[[str, FrameType], TFileDisposition], check_include: Callable[[str, FrameType], bool], - should_start_context: Optional[Callable[[FrameType], Optional[str]]], + should_start_context: Callable[[FrameType], str | None] | None, file_mapper: Callable[[str], str], timid: bool, branch: bool, warn: TWarnFn, - concurrency: List[str], + concurrency: list[str], metacov: bool, ) -> None: """Create a collector. @@ -136,16 +136,16 @@ def __init__( self.covdata: CoverageData self.threading = None - self.static_context: Optional[str] = None + self.static_context: str | None = None self.origin = short_stack() self.concur_id_func = None - self._trace_class: Type[TracerCore] - self.file_disposition_class: Type[TFileDisposition] + self._trace_class: type[TracerCore] + self.file_disposition_class: type[TFileDisposition] - core: Optional[str] + core: str | None if timid: core = "pytrace" else: @@ -225,7 +225,7 @@ def __init__( raise ConfigError( "Can't support concurrency={} with {}, only threads are supported.".format( tried, self.tracer_name(), - ) + ), ) if do_threading or not concurrencies: @@ -240,7 +240,7 @@ def __init__( def __repr__(self) -> str: return f"" - def use_data(self, covdata: CoverageData, context: Optional[str]) -> None: + def use_data(self, covdata: CoverageData, context: str | None) -> None: """Use `covdata` for recording data.""" self.covdata = covdata self.static_context = context @@ -268,9 +268,9 @@ def reset(self) -> None: # A dictionary mapping file names to file tracer plugin names that will # handle them. - self.file_tracers: Dict[str, str] = {} + self.file_tracers: dict[str, str] = {} - self.disabled_plugins: Set[str] = set() + self.disabled_plugins: set[str] = set() # The .should_trace_cache attribute is a cache from file names to # coverage.FileDisposition objects, or None. When a file is first @@ -301,7 +301,7 @@ def reset(self) -> None: self.should_trace_cache = {} # Our active Tracers. - self.tracers: List[TracerCore] = [] + self.tracers: list[TracerCore] = [] self._clear_data() @@ -342,12 +342,12 @@ def _start_tracer(self) -> TTraceFn | None: # # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 - def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> Optional[TTraceFn]: + def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None: """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function. sys.settrace(None) # Install the real tracer. - fn: Optional[TTraceFn] = self._start_tracer() + fn: TTraceFn | None = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: @@ -444,9 +444,9 @@ def _activity(self) -> bool: """ return any(tracer.activity() for tracer in self.tracers) - def switch_context(self, new_context: Optional[str]) -> None: + def switch_context(self, new_context: str | None) -> None: """Switch to a new dynamic context.""" - context: Optional[str] + context: str | None self.flush_data() if self.static_context: context = self.static_context @@ -471,7 +471,7 @@ def cached_mapped_file(self, filename: str) -> str: """A locally cached version of file names mapped through file_mapper.""" return self.file_mapper(filename) - def mapped_file_dict(self, d: Mapping[str, T]) -> Dict[str, T]: + def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]: """Return a dict like d, but with keys modified by file_mapper.""" # The call to list(items()) ensures that the GIL protects the dictionary # iterator against concurrent modifications by tracers running @@ -511,7 +511,7 @@ def flush_data(self) -> bool: # Unpack the line number pairs packed into integers. See # tracer.c:CTracer_record_pair for the C code that creates # these packed ints. - arc_data: Dict[str, List[TArc]] = {} + arc_data: dict[str, list[TArc]] = {} packed_data = cast(Dict[str, Set[int]], self.data) # The list() here and in the inner loop are to get a clean copy diff --git a/coverage/config.py b/coverage/config.py index 24d5642b2..7a7cd540e 100644 --- a/coverage/config.py +++ b/coverage/config.py @@ -13,7 +13,7 @@ import re from typing import ( - Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, + Any, Callable, Iterable, Union, ) from coverage.exceptions import ConfigError @@ -46,12 +46,12 @@ def __init__(self, our_file: bool) -> None: def read( # type: ignore[override] self, filenames: Iterable[str], - encoding_unused: Optional[str] = None, - ) -> List[str]: + encoding_unused: str | None = None, + ) -> list[str]: """Read a file name as UTF-8 configuration data.""" return super().read(filenames, encoding="utf-8") - def real_section(self, section: str) -> Optional[str]: + def real_section(self, section: str) -> str | None: """Get the actual name of a section.""" for section_prefix in self.section_prefixes: real_section = section_prefix + section @@ -69,7 +69,7 @@ def has_option(self, section: str, option: str) -> bool: def has_section(self, section: str) -> bool: return bool(self.real_section(section)) - def options(self, section: str) -> List[str]: + def options(self, section: str) -> list[str]: real_section = self.real_section(section) if real_section is not None: return super().options(real_section) @@ -77,7 +77,7 @@ def options(self, section: str) -> List[str]: def get_section(self, section: str) -> TConfigSectionOut: """Get the contents of a section, as a dictionary.""" - d: Dict[str, TConfigValueOut] = {} + d: dict[str, TConfigValueOut] = {} for opt in self.options(section): d[opt] = self.get(section, opt) return d @@ -103,7 +103,7 @@ def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # ty v = substitute_variables(v, os.environ) return v - def getlist(self, section: str, option: str) -> List[str]: + def getlist(self, section: str, option: str) -> list[str]: """Read a list of strings. The value of `section` and `option` is treated as a comma- and newline- @@ -121,7 +121,7 @@ def getlist(self, section: str, option: str) -> List[str]: values.append(value) return values - def getregexlist(self, section: str, option: str) -> List[str]: + def getregexlist(self, section: str, option: str) -> list[str]: """Read a list of full-line regexes. The value of `section` and `option` is treated as a newline-separated @@ -138,7 +138,7 @@ def getregexlist(self, section: str, option: str) -> List[str]: re.compile(value) except re.error as e: raise ConfigError( - f"Invalid [{section}].{option} value {value!r}: {e}" + f"Invalid [{section}].{option} value {value!r}: {e}", ) from e if value: value_list.append(value) @@ -180,12 +180,12 @@ def __init__(self) -> None: """Initialize the configuration attributes to their defaults.""" # Metadata about the config. # We tried to read these config files. - self.attempted_config_files: List[str] = [] + self.attempted_config_files: list[str] = [] # We did read these config files, but maybe didn't find any content for us. - self.config_files_read: List[str] = [] + self.config_files_read: list[str] = [] # The file that gave us our configuration. - self.config_file: Optional[str] = None - self._config_contents: Optional[bytes] = None + self.config_file: str | None = None + self._config_contents: bytes | None = None # Defaults for [run] and [report] self._include = None @@ -193,49 +193,49 @@ def __init__(self) -> None: # Defaults for [run] self.branch = False - self.command_line: Optional[str] = None - self.concurrency: List[str] = [] - self.context: Optional[str] = None + self.command_line: str | None = None + self.concurrency: list[str] = [] + self.context: str | None = None self.cover_pylib = False self.data_file = ".coverage" - self.debug: List[str] = [] - self.debug_file: Optional[str] = None - self.disable_warnings: List[str] = [] - self.dynamic_context: Optional[str] = None + self.debug: list[str] = [] + self.debug_file: str | None = None + self.disable_warnings: list[str] = [] + self.dynamic_context: str | None = None self.parallel = False - self.plugins: List[str] = [] + self.plugins: list[str] = [] self.relative_files = False - self.run_include: List[str] = [] - self.run_omit: List[str] = [] + self.run_include: list[str] = [] + self.run_omit: list[str] = [] self.sigterm = False - self.source: Optional[List[str]] = None - self.source_pkgs: List[str] = [] + self.source: list[str] | None = None + self.source_pkgs: list[str] = [] self.timid = False - self._crash: Optional[str] = None + self._crash: str | None = None # Defaults for [report] self.exclude_list = DEFAULT_EXCLUDE[:] - self.exclude_also: List[str] = [] + self.exclude_also: list[str] = [] self.fail_under = 0.0 - self.format: Optional[str] = None + self.format: str | None = None self.ignore_errors = False self.include_namespace_packages = False - self.report_include: Optional[List[str]] = None - self.report_omit: Optional[List[str]] = None + self.report_include: list[str] | None = None + self.report_omit: list[str] | None = None self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] self.partial_list = DEFAULT_PARTIAL[:] self.precision = 0 - self.report_contexts: Optional[List[str]] = None + self.report_contexts: list[str] | None = None self.show_missing = False self.skip_covered = False self.skip_empty = False - self.sort: Optional[str] = None + self.sort: str | None = None # Defaults for [html] - self.extra_css: Optional[str] = None + self.extra_css: str | None = None self.html_dir = "htmlcov" - self.html_skip_covered: Optional[bool] = None - self.html_skip_empty: Optional[bool] = None + self.html_skip_covered: bool | None = None + self.html_skip_empty: bool | None = None self.html_title = "Coverage report" self.show_contexts = False @@ -252,10 +252,10 @@ def __init__(self) -> None: self.lcov_output = "coverage.lcov" # Defaults for [paths] - self.paths: Dict[str, List[str]] = {} + self.paths: dict[str, list[str]] = {} # Options for plugins - self.plugin_options: Dict[str, TConfigSectionOut] = {} + self.plugin_options: dict[str, TConfigSectionOut] = {} MUST_BE_LIST = { "debug", "concurrency", "plugins", @@ -323,8 +323,8 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) for unknown in set(cp.options(section)) - options: warn( "Unrecognized option '[{}] {}=' in config file {}".format( - real_section, unknown, filename - ) + real_section, unknown, filename, + ), ) # [paths] is special @@ -395,7 +395,7 @@ def copy(self) -> CoverageConfig: ("exclude_list", "report:exclude_lines", "regexlist"), ("exclude_also", "report:exclude_also", "regexlist"), ("fail_under", "report:fail_under", "float"), - ("format", "report:format", "boolean"), + ("format", "report:format"), ("ignore_errors", "report:ignore_errors", "boolean"), ("include_namespace_packages", "report:include_namespace_packages", "boolean"), ("partial_always_list", "report:partial_branches_always", "regexlist"), @@ -453,7 +453,7 @@ def get_plugin_options(self, plugin: str) -> TConfigSectionOut: """Get a dictionary of options for the plugin named `plugin`.""" return self.plugin_options.get(plugin, {}) - def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and @@ -484,7 +484,7 @@ def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSecti # If we get here, we didn't find the option. raise ConfigError(f"No such option: {option_name!r}") - def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and @@ -527,14 +527,14 @@ def post_process(self) -> None: } self.exclude_list += self.exclude_also - def debug_info(self) -> List[Tuple[str, Any]]: + def debug_info(self) -> list[tuple[str, Any]]: """Make a list of (name, value) pairs for writing debug info.""" return human_sorted_items( (k, v) for k, v in self.__dict__.items() if not k.startswith("_") ) -def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool, bool]]: +def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]: """What config files should we try to read? Returns a list of tuples: @@ -566,7 +566,7 @@ def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool, def read_coverage_config( - config_file: Union[bool, str], + config_file: bool | str, warn: Callable[[str], None], **kwargs: TConfigValueIn, ) -> CoverageConfig: diff --git a/coverage/context.py b/coverage/context.py index 20a5c92d0..c8ee71271 100644 --- a/coverage/context.py +++ b/coverage/context.py @@ -6,12 +6,12 @@ from __future__ import annotations from types import FrameType -from typing import cast, Callable, Optional, Sequence +from typing import cast, Callable, Sequence def combine_context_switchers( - context_switchers: Sequence[Callable[[FrameType], Optional[str]]], -) -> Optional[Callable[[FrameType], Optional[str]]]: + context_switchers: Sequence[Callable[[FrameType], str | None]], +) -> Callable[[FrameType], str | None] | None: """Create a single context switcher from multiple switchers. `context_switchers` is a list of functions that take a frame as an @@ -30,7 +30,7 @@ def combine_context_switchers( if len(context_switchers) == 1: return context_switchers[0] - def should_start_context(frame: FrameType) -> Optional[str]: + def should_start_context(frame: FrameType) -> str | None: """The combiner for multiple context switchers.""" for switcher in context_switchers: new_context = switcher(frame) @@ -41,7 +41,7 @@ def should_start_context(frame: FrameType) -> Optional[str]: return should_start_context -def should_start_context_test_function(frame: FrameType) -> Optional[str]: +def should_start_context_test_function(frame: FrameType) -> str | None: """Is this frame calling a test_* function?""" co_name = frame.f_code.co_name if co_name.startswith("test") or co_name == "runTest": @@ -49,7 +49,7 @@ def should_start_context_test_function(frame: FrameType) -> Optional[str]: return None -def qualname_from_frame(frame: FrameType) -> Optional[str]: +def qualname_from_frame(frame: FrameType) -> str | None: """Get a qualified name for the code running in `frame`.""" co = frame.f_code fname = co.co_name diff --git a/coverage/control.py b/coverage/control.py index d33ef769a..6f7f9a311 100644 --- a/coverage/control.py +++ b/coverage/control.py @@ -20,7 +20,7 @@ from types import FrameType from typing import ( cast, - Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, Tuple, Union, + Any, Callable, IO, Iterable, Iterator, List, ) from coverage import env @@ -30,7 +30,7 @@ from coverage.context import should_start_context_test_function, combine_context_switchers from coverage.data import CoverageData, combine_parallel_data from coverage.debug import ( - DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display + DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display, ) from coverage.disposition import disposition_debug_msg from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError @@ -104,10 +104,10 @@ class Coverage(TConfigurable): """ # The stack of started Coverage instances. - _instances: List[Coverage] = [] + _instances: list[Coverage] = [] @classmethod - def current(cls) -> Optional[Coverage]: + def current(cls) -> Coverage | None: """Get the latest started `Coverage` instance, if any. Returns: a `Coverage` instance, or None. @@ -122,21 +122,21 @@ def current(cls) -> Optional[Coverage]: def __init__( # pylint: disable=too-many-arguments self, - data_file: Optional[Union[FilePath, DefaultValue]] = DEFAULT_DATAFILE, - data_suffix: Optional[Union[str, bool]] = None, - cover_pylib: Optional[bool] = None, + data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE, + data_suffix: str | bool | None = None, + cover_pylib: bool | None = None, auto_data: bool = False, - timid: Optional[bool] = None, - branch: Optional[bool] = None, - config_file: Union[FilePath, bool] = True, - source: Optional[Iterable[str]] = None, - source_pkgs: Optional[Iterable[str]] = None, - omit: Optional[Union[str, Iterable[str]]] = None, - include: Optional[Union[str, Iterable[str]]] = None, - debug: Optional[Iterable[str]] = None, - concurrency: Optional[Union[str, Iterable[str]]] = None, + timid: bool | None = None, + branch: bool | None = None, + config_file: FilePath | bool = True, + source: Iterable[str] | None = None, + source_pkgs: Iterable[str] | None = None, + omit: str | Iterable[str] | None = None, + include: str | Iterable[str] | None = None, + debug: Iterable[str] | None = None, + concurrency: str | Iterable[str] | None = None, check_preimported: bool = False, - context: Optional[str] = None, + context: str | None = None, messages: bool = False, ) -> None: """ @@ -240,7 +240,7 @@ def __init__( # pylint: disable=too-many-arguments data_file = os.fspath(data_file) # This is injectable by tests. - self._debug_file: Optional[IO[str]] = None + self._debug_file: IO[str] | None = None self._auto_load = self._auto_save = auto_data self._data_suffix_specified = data_suffix @@ -249,25 +249,25 @@ def __init__( # pylint: disable=too-many-arguments self._warn_no_data = True self._warn_unimported_source = True self._warn_preimported_source = check_preimported - self._no_warn_slugs: List[str] = [] + self._no_warn_slugs: list[str] = [] self._messages = messages # A record of all the warnings that have been issued. - self._warnings: List[str] = [] + self._warnings: list[str] = [] # Other instance attributes, set with placebos or placeholders. # More useful objects will be created later. self._debug: DebugControl = NoDebugging() - self._inorout: Optional[InOrOut] = None + self._inorout: InOrOut | None = None self._plugins: Plugins = Plugins() - self._data: Optional[CoverageData] = None - self._collector: Optional[Collector] = None + self._data: CoverageData | None = None + self._collector: Collector | None = None self._metacov = False self._file_mapper: Callable[[str], str] = abs_file self._data_suffix = self._run_suffix = None - self._exclude_re: Dict[str, str] = {} - self._old_sigterm: Optional[Callable[[int, Optional[FrameType]], Any]] = None + self._exclude_re: dict[str, str] = {} + self._old_sigterm: Callable[[int, FrameType | None], Any] | None = None # State machine variables: # Have we initialized everything? @@ -413,7 +413,7 @@ def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: return not reason - def _warn(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None: + def _warn(self, msg: str, slug: str | None = None, once: bool = False) -> None: """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. @@ -445,7 +445,7 @@ def _message(self, msg: str) -> None: if self._messages: print(msg) - def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and @@ -463,7 +463,7 @@ def get_option(self, option_name: str) -> Optional[TConfigValueOut]: """ return self.config.get_option(option_name) - def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and @@ -511,7 +511,7 @@ def load(self) -> None: def _init_for_start(self) -> None: """Initialization for start()""" # Construct the collector. - concurrency: List[str] = self.config.concurrency or [] + concurrency: list[str] = self.config.concurrency or [] if "multiprocessing" in concurrency: if self.config.config_file is None: raise ConfigError("multiprocessing requires a configuration file") @@ -570,7 +570,7 @@ def _init_for_start(self) -> None: for plugin in self._plugins.file_tracers ), self._collector.tracer_name(), - ) + ), ) for plugin in self._plugins.file_tracers: plugin._coverage_enabled = False @@ -600,7 +600,7 @@ def _init_for_start(self) -> None: signal.SIGTERM, self._on_sigterm, ) - def _init_data(self, suffix: Optional[Union[str, bool]]) -> None: + def _init_data(self, suffix: str | bool | None) -> None: """Create a data file if we don't have one yet.""" if self._data is None: # Create the data file. We do this at construction time so that the @@ -685,7 +685,7 @@ def _atexit(self, event: str = "atexit") -> None: if self._auto_save or event == "sigterm": self.save() - def _on_sigterm(self, signum_unused: int, frame_unused: Optional[FrameType]) -> None: + def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> None: """A handler for signal.SIGTERM.""" self._atexit("sigterm") # Statements after here won't be seen by metacov because we just wrote @@ -769,7 +769,7 @@ def _exclude_regex(self, which: str) -> str: self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] - def get_exclude_list(self, which: str = "exclude") -> List[str]: + def get_exclude_list(self, which: str = "exclude") -> list[str]: """Return a list of excluded regex strings. `which` indicates which list is desired. See :meth:`exclude` for the @@ -798,9 +798,9 @@ def _make_aliases(self) -> PathAliases: def combine( self, - data_paths: Optional[Iterable[str]] = None, + data_paths: Iterable[str] | None = None, strict: bool = False, - keep: bool = False + keep: bool = False, ) -> None: """Combine together a number of similarly-named coverage data files. @@ -896,7 +896,7 @@ def _post_save_work(self) -> None: self._data.touch_files(paths, plugin_name) # Backward compatibility with version 1. - def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str]: + def analysis(self, morf: TMorf) -> tuple[str, list[TLineNo], list[TLineNo], str]: """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf @@ -904,7 +904,7 @@ def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str] def analysis2( self, morf: TMorf, - ) -> Tuple[str, List[TLineNo], List[TLineNo], List[TLineNo], str]: + ) -> tuple[str, list[TLineNo], list[TLineNo], list[TLineNo], str]: """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine @@ -930,7 +930,7 @@ def analysis2( analysis.missing_formatted(), ) - def _analyze(self, it: Union[FileReporter, TMorf]) -> Analysis: + def _analyze(self, it: FileReporter | TMorf) -> Analysis: """Analyze a single morf or code unit. Returns an `Analysis` object. @@ -952,7 +952,7 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter: """Get a FileReporter for a module or file name.""" assert self._data is not None plugin = None - file_reporter: Union[str, FileReporter] = "python" + file_reporter: str | FileReporter = "python" if isinstance(morf, str): mapped_morf = self._file_mapper(morf) @@ -965,8 +965,8 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter: if file_reporter is None: raise PluginError( "Plugin {!r} did not provide a file reporter for {!r}.".format( - plugin._coverage_plugin_name, morf - ) + plugin._coverage_plugin_name, morf, + ), ) if file_reporter == "python": @@ -975,7 +975,7 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter: assert isinstance(file_reporter, FileReporter) return file_reporter - def _get_file_reporters(self, morfs: Optional[Iterable[TMorf]] = None) -> List[FileReporter]: + def _get_file_reporters(self, morfs: Iterable[TMorf] | None = None) -> list[FileReporter]: """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return @@ -1007,18 +1007,18 @@ def _prepare_data_for_reporting(self) -> None: def report( self, - morfs: Optional[Iterable[TMorf]] = None, - show_missing: Optional[bool] = None, - ignore_errors: Optional[bool] = None, - file: Optional[IO[str]] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - skip_covered: Optional[bool] = None, - contexts: Optional[List[str]] = None, - skip_empty: Optional[bool] = None, - precision: Optional[int] = None, - sort: Optional[str] = None, - output_format: Optional[str] = None, + morfs: Iterable[TMorf] | None = None, + show_missing: bool | None = None, + ignore_errors: bool | None = None, + file: IO[str] | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + skip_covered: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, + sort: str | None = None, + output_format: str | None = None, ) -> float: """Write a textual summary report to `file`. @@ -1089,21 +1089,15 @@ def report( def annotate( self, - morfs: Optional[Iterable[TMorf]] = None, - directory: Optional[str] = None, - ignore_errors: Optional[bool] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - contexts: Optional[List[str]] = None, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, ) -> None: """Annotate a list of modules. - .. note:: - - This method has been obsoleted by more modern reporting tools, - including the :meth:`html_report` method. It will be removed in a - future version. - Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", @@ -1125,18 +1119,18 @@ def annotate( def html_report( self, - morfs: Optional[Iterable[TMorf]] = None, - directory: Optional[str] = None, - ignore_errors: Optional[bool] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - extra_css: Optional[str] = None, - title: Optional[str] = None, - skip_covered: Optional[bool] = None, - show_contexts: Optional[bool] = None, - contexts: Optional[List[str]] = None, - skip_empty: Optional[bool] = None, - precision: Optional[int] = None, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + extra_css: str | None = None, + title: str | None = None, + skip_covered: bool | None = None, + show_contexts: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, ) -> float: """Generate an HTML report. @@ -1183,13 +1177,13 @@ def html_report( def xml_report( self, - morfs: Optional[Iterable[TMorf]] = None, - outfile: Optional[str] = None, - ignore_errors: Optional[bool] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - contexts: Optional[List[str]] = None, - skip_empty: Optional[bool] = None, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, ) -> float: """Generate an XML report of coverage results. @@ -1217,14 +1211,14 @@ def xml_report( def json_report( self, - morfs: Optional[Iterable[TMorf]] = None, - outfile: Optional[str] = None, - ignore_errors: Optional[bool] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - contexts: Optional[List[str]] = None, - pretty_print: Optional[bool] = None, - show_contexts: Optional[bool] = None, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + pretty_print: bool | None = None, + show_contexts: bool | None = None, ) -> float: """Generate a JSON report of coverage results. @@ -1255,12 +1249,12 @@ def json_report( def lcov_report( self, - morfs: Optional[Iterable[TMorf]] = None, - outfile: Optional[str] = None, - ignore_errors: Optional[bool] = None, - omit: Optional[Union[str, List[str]]] = None, - include: Optional[Union[str, List[str]]] = None, - contexts: Optional[List[str]] = None, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, ) -> float: """Generate an LCOV report of coverage results. @@ -1282,7 +1276,7 @@ def lcov_report( ): return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) - def sys_info(self) -> Iterable[Tuple[str, Any]]: + def sys_info(self) -> Iterable[tuple[str, Any]]: """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod @@ -1290,7 +1284,7 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]: self._init() self._post_init() - def plugin_info(plugins: List[Any]) -> List[str]: + def plugin_info(plugins: list[Any]) -> list[str]: """Make an entry for the sys_info from a list of plug-ins.""" entries = [] for plugin in plugins: @@ -1312,7 +1306,7 @@ def plugin_info(plugins: List[Any]) -> List[str]: ("configs_read", self.config.config_files_read), ("config_file", self.config.config_file), ("config_contents", - repr(self.config._config_contents) if self.config._config_contents else "-none-" + repr(self.config._config_contents) if self.config._config_contents else "-none-", ), ("data_file", self._data.data_filename() if self._data is not None else "-none-"), ("python", sys.version.replace("\n", "")), @@ -1343,11 +1337,11 @@ def plugin_info(plugins: List[Any]) -> List[str]: Coverage = decorate_methods( # type: ignore[misc] show_calls(show_args=True), - butnot=["get_data"] + butnot=["get_data"], )(Coverage) -def process_startup() -> Optional[Coverage]: +def process_startup() -> Coverage | None: """Call this at Python start-up to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage diff --git a/coverage/data.py b/coverage/data.py index 0868173b6..9513adfca 100644 --- a/coverage/data.py +++ b/coverage/data.py @@ -16,7 +16,7 @@ import hashlib import os.path -from typing import Callable, Dict, Iterable, List, Optional +from typing import Callable, Iterable from coverage.exceptions import CoverageException, NoDataError from coverage.files import PathAliases @@ -24,7 +24,7 @@ from coverage.sqldata import CoverageData -def line_counts(data: CoverageData, fullpath: bool = False) -> Dict[str, int]: +def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]: """Return a dict summarizing the line coverage data. Keys are based on the file names, and values are the number of executed @@ -63,7 +63,7 @@ def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: hasher.update(data.file_tracer(filename)) -def combinable_files(data_file: str, data_paths: Optional[Iterable[str]] = None) -> List[str]: +def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]: """Make a list of data files to be combined. `data_file` is a path to a data file. `data_paths` is a list of files or @@ -88,16 +88,19 @@ def combinable_files(data_file: str, data_paths: Optional[Iterable[str]] = None) # We never want to combine those. files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")] - return files_to_combine + # Sorting isn't usually needed, since it shouldn't matter what order files + # are combined, but sorting makes tests more predictable, and makes + # debugging more understandable when things go wrong. + return sorted(files_to_combine) def combine_parallel_data( data: CoverageData, - aliases: Optional[PathAliases] = None, - data_paths: Optional[Iterable[str]] = None, + aliases: PathAliases | None = None, + data_paths: Iterable[str] | None = None, strict: bool = False, keep: bool = False, - message: Optional[Callable[[str], None]] = None, + message: Callable[[str], None] | None = None, ) -> None: """Combine a number of data files together. @@ -212,7 +215,7 @@ def debug_data_file(filename: str) -> None: print(line) -def sorted_lines(data: CoverageData, filename: str) -> List[int]: +def sorted_lines(data: CoverageData, filename: str) -> list[int]: """Get the sorted lines for a file, for tests.""" lines = data.lines(filename) return sorted(lines or []) diff --git a/coverage/debug.py b/coverage/debug.py index 8aaecb589..e4bed8b42 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -21,7 +21,7 @@ from typing import ( overload, - Any, Callable, IO, Iterable, Iterator, Mapping, Optional, List, Tuple, + Any, Callable, IO, Iterable, Iterator, Mapping, ) from coverage.misc import human_sorted_items, isolate_module @@ -33,7 +33,7 @@ # When debugging, it can be helpful to force some options, especially when # debugging the configuration mechanisms you usually use to control debugging! # This is a list of forced debugging options. -FORCED_DEBUG: List[str] = [] +FORCED_DEBUG: list[str] = [] FORCED_DEBUG_FILE = None @@ -45,8 +45,8 @@ class DebugControl: def __init__( self, options: Iterable[str], - output: Optional[IO[str]], - file_name: Optional[str] = None, + output: IO[str] | None, + file_name: str | None = None, ) -> None: """Configure the options and output file for debugging.""" self.options = list(options) + FORCED_DEBUG @@ -87,7 +87,7 @@ def without_callers(self) -> Iterator[None]: finally: self.suppress_callers = old - def write(self, msg: str, *, exc: Optional[BaseException] = None) -> None: + def write(self, msg: str, *, exc: BaseException | None = None) -> None: """Write a line of debug output. `msg` is the line to write. A newline will be appended. @@ -118,7 +118,7 @@ def should(self, option: str) -> bool: """Should we write debug messages? Never.""" return False - def write(self, msg: str, *, exc: Optional[BaseException] = None) -> None: + def write(self, msg: str, *, exc: BaseException | None = None) -> None: """This will never be called.""" raise AssertionError("NoDebugging.write should never be called.") @@ -128,7 +128,7 @@ def info_header(label: str) -> str: return "--{:-<60s}".format(" "+label+" ") -def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]: +def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]: """Produce a sequence of formatted lines from info. `info` is a sequence of pairs (label, data). The produced lines are @@ -158,7 +158,7 @@ def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]: def write_formatted_info( write: Callable[[str], None], header: str, - info: Iterable[Tuple[str, Any]], + info: Iterable[tuple[str, Any]], ) -> None: """Write a sequence of (label,data) pairs nicely. @@ -179,10 +179,10 @@ def exc_one_line(exc: Exception) -> str: return "|".join(l.rstrip() for l in lines) -_FILENAME_REGEXES: List[Tuple[str, str]] = [ +_FILENAME_REGEXES: list[tuple[str, str]] = [ (r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"), ] -_FILENAME_SUBS: List[Tuple[str, str]] = [] +_FILENAME_SUBS: list[tuple[str, str]] = [] @overload def short_filename(filename: str) -> str: @@ -192,7 +192,7 @@ def short_filename(filename: str) -> str: def short_filename(filename: None) -> None: pass -def short_filename(filename: Optional[str]) -> Optional[str]: +def short_filename(filename: str | None) -> str | None: """Shorten a file name. Directories are replaced by prefixes like 'syspath:'""" if not _FILENAME_SUBS: for pathdir in sys.path: @@ -247,7 +247,7 @@ def short_stack( for pat in BORING_PRELUDE: stack = itertools.dropwhile( (lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc] - stack + stack, ) lines = [] for frame_info in stack: @@ -350,7 +350,7 @@ def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: class CwdTracker: """A class to add cwd info to debug messages.""" def __init__(self) -> None: - self.cwd: Optional[str] = None + self.cwd: str | None = None def filter(self, text: str) -> str: """Add a cwd message for each new cwd.""" @@ -393,7 +393,7 @@ def filter(self, text: str) -> str: class PytestTracker: """Track the current pytest test name to add to debug messages.""" def __init__(self) -> None: - self.test_name: Optional[str] = None + self.test_name: str | None = None def filter(self, text: str) -> str: """Add a message when the pytest test changes.""" @@ -408,7 +408,7 @@ class DebugOutputFile: """A file-like object that includes pid and cwd information.""" def __init__( self, - outfile: Optional[IO[str]], + outfile: IO[str] | None, filters: Iterable[Callable[[str], str]], ): self.outfile = outfile @@ -418,8 +418,8 @@ def __init__( @classmethod def get_one( cls, - fileobj: Optional[IO[str]] = None, - file_name: Optional[str] = None, + fileobj: IO[str] | None = None, + file_name: str | None = None, filters: Iterable[Callable[[str], str]] = (), interim: bool = False, ) -> DebugOutputFile: @@ -478,7 +478,7 @@ def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None: sys.modules[cls.SYS_MOD_NAME] = singleton_module @classmethod - def _get_singleton_data(cls) -> Tuple[Optional[DebugOutputFile], bool]: + def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]: """Get the one DebugOutputFile.""" singleton_module = sys.modules.get(cls.SYS_MOD_NAME) return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True)) @@ -582,7 +582,7 @@ def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: return _decorator -def relevant_environment_display(env: Mapping[str, str]) -> List[Tuple[str, str]]: +def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]: """Filter environment variables for a debug display. Select variables to display (with COV or PY in the name, or HOME, TEMP, or diff --git a/coverage/disposition.py b/coverage/disposition.py index 3cc6c8d68..7aa15e97a 100644 --- a/coverage/disposition.py +++ b/coverage/disposition.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Optional, Type, TYPE_CHECKING +from typing import TYPE_CHECKING from coverage.types import TFileDisposition @@ -18,10 +18,10 @@ class FileDisposition: original_filename: str canonical_filename: str - source_filename: Optional[str] + source_filename: str | None trace: bool reason: str - file_tracer: Optional[FileTracer] + file_tracer: FileTracer | None has_dynamic_filename: bool def __repr__(self) -> str: @@ -32,7 +32,7 @@ def __repr__(self) -> str: # be implemented in either C or Python. Acting on them is done with these # functions. -def disposition_init(cls: Type[TFileDisposition], original_filename: str) -> TFileDisposition: +def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition: """Construct and initialize a new FileDisposition object.""" disp = cls() disp.original_filename = original_filename diff --git a/coverage/env.py b/coverage/env.py index b6b9caca3..063f9d15b 100644 --- a/coverage/env.py +++ b/coverage/env.py @@ -9,7 +9,7 @@ import platform import sys -from typing import Any, Iterable, Tuple +from typing import Any, Iterable # debug_info() at the bottom wants to show all the globals, but not imports. # Grab the global names here to know which names to not show. Nothing defined @@ -134,7 +134,7 @@ class PYBEHAVIOR: TESTING = os.getenv("COVERAGE_TESTING") == "True" -def debug_info() -> Iterable[Tuple[str, Any]]: +def debug_info() -> Iterable[tuple[str, Any]]: """Return a list of (name, value) pairs for printing debug information.""" info = [ (name, value) for name, value in globals().items() diff --git a/coverage/execfile.py b/coverage/execfile.py index aac4d30bb..7011c70f9 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -15,7 +15,7 @@ from importlib.machinery import ModuleSpec from types import CodeType, ModuleType -from typing import Any, List, Optional, Tuple +from typing import Any from coverage import env from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource @@ -39,7 +39,7 @@ def __init__(self, fullname: str, *_args: Any) -> None: def find_module( modulename: str, -) -> Tuple[Optional[str], str, ModuleSpec]: +) -> tuple[str | None, str, ModuleSpec]: """Find the module named `modulename`. Returns the file path of the module, the name of the enclosing @@ -59,7 +59,7 @@ def find_module( if not spec: raise NoSource( f"No module named {mod_main}; " + - f"{modulename!r} is a package and cannot be directly executed" + f"{modulename!r} is a package and cannot be directly executed", ) pathname = spec.origin packagename = spec.name @@ -73,23 +73,23 @@ class PyRunner: This is meant to emulate real Python execution as closely as possible. """ - def __init__(self, args: List[str], as_module: bool = False) -> None: + def __init__(self, args: list[str], as_module: bool = False) -> None: self.args = args self.as_module = as_module self.arg0 = args[0] - self.package: Optional[str] = None - self.modulename: Optional[str] = None - self.pathname: Optional[str] = None - self.loader: Optional[DummyLoader] = None - self.spec: Optional[ModuleSpec] = None + self.package: str | None = None + self.modulename: str | None = None + self.pathname: str | None = None + self.loader: DummyLoader | None = None + self.spec: ModuleSpec | None = None def prepare(self) -> None: """Set sys.path properly. This needs to happen before any importing, and without importing anything. """ - path0: Optional[str] + path0: str | None if self.as_module: path0 = os.getcwd() elif os.path.isdir(self.arg0): @@ -257,7 +257,7 @@ def run(self) -> None: os.chdir(cwd) -def run_python_module(args: List[str]) -> None: +def run_python_module(args: list[str]) -> None: """Run a Python module, as though with ``python -m name args...``. `args` is the argument array to present as sys.argv, including the first @@ -271,7 +271,7 @@ def run_python_module(args: List[str]) -> None: runner.run() -def run_python_file(args: List[str]) -> None: +def run_python_file(args: list[str]) -> None: """Run a Python file as if it were the main program on the command line. `args` is the argument array to present as sys.argv, including the first diff --git a/coverage/files.py b/coverage/files.py index 0b276d16a..0dd3c4e01 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -13,7 +13,7 @@ import re import sys -from typing import Callable, Dict, Iterable, List, Optional, Tuple +from typing import Callable, Iterable from coverage import env from coverage.exceptions import ConfigError @@ -24,7 +24,7 @@ RELATIVE_DIR: str = "" -CANONICAL_FILENAME_CACHE: Dict[str, str] = {} +CANONICAL_FILENAME_CACHE: dict[str, str] = {} def set_relative_directory() -> None: """Set the directory that `relative_filename` will be relative to.""" @@ -110,8 +110,8 @@ def flat_rootname(filename: str) -> str: if env.WINDOWS: - _ACTUAL_PATH_CACHE: Dict[str, str] = {} - _ACTUAL_PATH_LIST_CACHE: Dict[str, List[str]] = {} + _ACTUAL_PATH_CACHE: dict[str, str] = {} + _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {} def actual_path(path: str) -> str: """Get the actual path of `path`, including the correct case.""" @@ -156,7 +156,7 @@ def abs_file(path: str) -> str: return actual_path(os.path.abspath(os.path.realpath(path))) -def zip_location(filename: str) -> Optional[Tuple[str, str]]: +def zip_location(filename: str) -> tuple[str, str] | None: """Split a filename into a zipfile / inner name pair. Only return a pair if the zipfile exists. No check is made if the inner @@ -197,7 +197,7 @@ def isabs_anywhere(filename: str) -> bool: return ntpath.isabs(filename) or posixpath.isabs(filename) -def prep_patterns(patterns: Iterable[str]) -> List[str]: +def prep_patterns(patterns: Iterable[str]) -> list[str]: """Prepare the file patterns for use in a `GlobMatcher`. If a pattern starts with a wildcard, it is used as a pattern @@ -224,7 +224,7 @@ class TreeMatcher: """ def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: - self.original_paths: List[str] = human_sorted(paths) + self.original_paths: list[str] = human_sorted(paths) #self.paths = list(map(os.path.normcase, paths)) self.paths = [os.path.normcase(p) for p in paths] self.name = name @@ -232,7 +232,7 @@ def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: def __repr__(self) -> str: return f"" - def info(self) -> List[str]: + def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.original_paths @@ -259,7 +259,7 @@ def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: def __repr__(self) -> str: return f"" - def info(self) -> List[str]: + def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.modules @@ -289,7 +289,7 @@ def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: def __repr__(self) -> str: return f"" - def info(self) -> List[str]: + def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.pats @@ -389,11 +389,11 @@ class PathAliases: """ def __init__( self, - debugfn: Optional[Callable[[str], None]] = None, + debugfn: Callable[[str], None] | None = None, relative: bool = False, ) -> None: # A list of (original_pattern, regex, result) - self.aliases: List[Tuple[str, re.Pattern[str], str]] = [] + self.aliases: list[tuple[str, re.Pattern[str], str]] = [] self.debugfn = debugfn or (lambda msg: 0) self.relative = relative self.pprinted = False @@ -478,17 +478,20 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: if not exists(new): self.debugfn( f"Rule {original_pattern!r} changed {path!r} to {new!r} " + - "which doesn't exist, continuing" + "which doesn't exist, continuing", ) continue self.debugfn( f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + - f"producing {new!r}" + f"producing {new!r}", ) return new # If we get here, no pattern matched. + if self.relative: + path = relative_filename(path) + if self.relative and not isabs_anywhere(path): # Auto-generate a pattern to implicitly match relative files parts = re.split(r"[/\\]", path) @@ -500,7 +503,7 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: # Only add a new pattern if we don't already have this pattern. if not any(p == pattern for p, _, _ in self.aliases): self.debugfn( - f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}" + f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}", ) self.aliases.append((pattern, re.compile(regex_pat), result)) return self.map(path, exists=exists) diff --git a/coverage/html.py b/coverage/html.py index 5a571dac0..e2bae1d6b 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -15,7 +15,7 @@ import string from dataclasses import dataclass -from typing import Any, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING, cast +from typing import Any, Iterable, TYPE_CHECKING, cast import coverage from coverage.data import CoverageData, add_data_to_hash @@ -77,19 +77,19 @@ def write_html(fname: str, html: str) -> None: @dataclass class LineData: """The data for each source line of HTML output.""" - tokens: List[Tuple[str, str]] + tokens: list[tuple[str, str]] number: TLineNo category: str statement: bool - contexts: List[str] + contexts: list[str] contexts_label: str - context_list: List[str] - short_annotations: List[str] - long_annotations: List[str] + context_list: list[str] + short_annotations: list[str] + long_annotations: list[str] html: str = "" - context_str: Optional[str] = None - annotate: Optional[str] = None - annotate_long: Optional[str] = None + context_str: str | None = None + annotate: str | None = None + annotate_long: str | None = None css_class: str = "" @@ -98,7 +98,7 @@ class FileData: """The data for each source file of HTML output.""" relative_filename: str nums: Numbers - lines: List[LineData] + lines: list[LineData] class HtmlDataGeneration: @@ -233,7 +233,7 @@ def __init__(self, cov: Coverage) -> None: title = self.config.html_title - self.extra_css: Optional[str] + self.extra_css: str | None if self.config.extra_css: self.extra_css = os.path.basename(self.config.extra_css) else: @@ -242,8 +242,8 @@ def __init__(self, cov: Coverage) -> None: self.data = self.coverage.get_data() self.has_arcs = self.data.has_arcs() - self.file_summaries: List[IndexInfoDict] = [] - self.all_files_nums: List[Numbers] = [] + self.file_summaries: list[IndexInfoDict] = [] + self.all_files_nums: list[Numbers] = [] self.incr = IncrementalChecker(self.directory) self.datagen = HtmlDataGeneration(self.coverage) self.totals = Numbers(precision=self.config.precision) @@ -278,7 +278,7 @@ def __init__(self, cov: Coverage) -> None: self.pyfile_html_source = read_data("pyfile.html") self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) - def report(self, morfs: Optional[Iterable[TMorf]]) -> float: + def report(self, morfs: Iterable[TMorf] | None) -> float: """Generate an HTML report for `morfs`. `morfs` is a list of modules or file names. @@ -447,7 +447,7 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) -> css_classes = [] if ldata.category: css_classes.append( - self.template_globals["category"][ldata.category] # type: ignore[index] + self.template_globals["category"][ldata.category], # type: ignore[index] ) ldata.css_class = " ".join(css_classes) or "pln" @@ -546,7 +546,7 @@ def __init__(self, directory: str) -> None: def reset(self) -> None: """Initialize to empty. Causes all files to be reported.""" self.globals = "" - self.files: Dict[str, FileInfoDict] = {} + self.files: dict[str, FileInfoDict] = {} def read(self) -> None: """Read the information we stored last time.""" @@ -651,6 +651,6 @@ def escape(t: str) -> str: return t.replace("&", "&").replace("<", "<") -def pair(ratio: Tuple[int, int]) -> str: +def pair(ratio: tuple[int, int]) -> str: """Format a pair of numbers so JavaScript can read them in an attribute.""" - return "%s %s" % ratio + return "{} {}".format(*ratio) diff --git a/coverage/htmlfiles/style.css b/coverage/htmlfiles/style.css index 2555fdfee..aec9cbef2 100644 --- a/coverage/htmlfiles/style.css +++ b/coverage/htmlfiles/style.css @@ -154,7 +154,7 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em #source p .n.highlight { background: #ffdd00; } -#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; } +#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } diff --git a/coverage/htmlfiles/style.scss b/coverage/htmlfiles/style.scss index 5b6cf373a..7e2e0bce4 100644 --- a/coverage/htmlfiles/style.scss +++ b/coverage/htmlfiles/style.scss @@ -426,11 +426,9 @@ $border-indicator-width: .2em; } a { - // These two lines make anchors to the line scroll the line to be + // Make anchors to the line scroll the line to be // visible beneath the fixed-position header. - margin-top: -4em; - padding-top: 4em; - + scroll-margin-top: 6em; text-decoration: none; color: $light-gray4; @include color-dark($dark-gray4); diff --git a/coverage/inorout.py b/coverage/inorout.py index 2e3746249..5ea29edf1 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -17,7 +17,7 @@ from types import FrameType, ModuleType from typing import ( - cast, Any, Iterable, List, Optional, Set, Tuple, Type, TYPE_CHECKING, + cast, Any, Iterable, TYPE_CHECKING, ) from coverage import env @@ -38,7 +38,7 @@ # when deciding where the stdlib is. These modules are not used for anything, # they are modules importable from the pypy lib directories, so that we can # find those directories. -modules_we_happen_to_have: List[ModuleType] = [ +modules_we_happen_to_have: list[ModuleType] = [ inspect, itertools, os, platform, re, sysconfig, traceback, ] @@ -70,7 +70,7 @@ def canonical_path(morf: TMorf, directory: bool = False) -> str: return morf_path -def name_for_module(filename: str, frame: Optional[FrameType]) -> str: +def name_for_module(filename: str, frame: FrameType | None) -> str: """Get the name of the module for a filename and frame. For configurability's sake, we allow __main__ modules to be matched by @@ -117,7 +117,7 @@ def module_has_file(mod: ModuleType) -> bool: return os.path.exists(mod__file__) -def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]: +def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]: """Find the file and search path for `modulename`. Returns: @@ -138,7 +138,7 @@ def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]] return filename, path -def add_stdlib_paths(paths: Set[str]) -> None: +def add_stdlib_paths(paths: set[str]) -> None: """Add paths where the stdlib can be found to the set `paths`.""" # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some @@ -150,7 +150,7 @@ def add_stdlib_paths(paths: Set[str]) -> None: paths.add(canonical_path(m, directory=True)) -def add_third_party_paths(paths: Set[str]) -> None: +def add_third_party_paths(paths: set[str]) -> None: """Add locations for third-party packages to the set `paths`.""" # Get the paths that sysconfig knows about. scheme_names = set(sysconfig.get_scheme_names()) @@ -164,7 +164,7 @@ def add_third_party_paths(paths: Set[str]) -> None: paths.add(config_paths[path_name]) -def add_coverage_paths(paths: Set[str]) -> None: +def add_coverage_paths(paths: set[str]) -> None: """Add paths where coverage.py code can be found to the set `paths`.""" cover_path = canonical_path(__file__, directory=True) paths.add(cover_path) @@ -180,15 +180,15 @@ def __init__( self, config: CoverageConfig, warn: TWarnFn, - debug: Optional[TDebugCtl], + debug: TDebugCtl | None, include_namespace_packages: bool, ) -> None: self.warn = warn self.debug = debug self.include_namespace_packages = include_namespace_packages - self.source: List[str] = [] - self.source_pkgs: List[str] = [] + self.source: list[str] = [] + self.source_pkgs: list[str] = [] self.source_pkgs.extend(config.source_pkgs) for src in config.source or []: if os.path.isdir(src): @@ -201,17 +201,17 @@ def __init__( self.omit = prep_patterns(config.run_omit) # The directories for files considered "installed with the interpreter". - self.pylib_paths: Set[str] = set() + self.pylib_paths: set[str] = set() if not config.cover_pylib: add_stdlib_paths(self.pylib_paths) # To avoid tracing the coverage.py code itself, we skip anything # located where we are. - self.cover_paths: Set[str] = set() + self.cover_paths: set[str] = set() add_coverage_paths(self.cover_paths) # Find where third-party packages are installed. - self.third_paths: Set[str] = set() + self.third_paths: set[str] = set() add_third_party_paths(self.third_paths) def _debug(msg: str) -> None: @@ -270,14 +270,14 @@ def _debug(msg: str) -> None: if modfile: if self.third_match.match(modfile): _debug( - f"Source in third-party: source_pkg {pkg!r} at {modfile!r}" + f"Source in third-party: source_pkg {pkg!r} at {modfile!r}", ) self.source_in_third_paths.add(canonical_path(source_for_file(modfile))) else: for pathdir in path: if self.third_match.match(pathdir): _debug( - f"Source in third-party: {pkg!r} path directory at {pathdir!r}" + f"Source in third-party: {pkg!r} path directory at {pathdir!r}", ) self.source_in_third_paths.add(pathdir) @@ -289,9 +289,9 @@ def _debug(msg: str) -> None: _debug(f"Source in third-party matching: {self.source_in_third_match}") self.plugins: Plugins - self.disp_class: Type[TFileDisposition] = FileDisposition + self.disp_class: type[TFileDisposition] = FileDisposition - def should_trace(self, filename: str, frame: Optional[FrameType] = None) -> TFileDisposition: + def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition: """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name @@ -363,7 +363,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: disp.has_dynamic_filename = True else: disp.source_filename = canonical_filename( - file_tracer.source_filename() + file_tracer.source_filename(), ) break except Exception: @@ -380,7 +380,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: if not disp.has_dynamic_filename: if not disp.source_filename: raise PluginError( - f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'" + f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'", ) reason = self.check_include_omit_etc(disp.source_filename, frame) if reason: @@ -388,7 +388,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: return disp - def check_include_omit_etc(self, filename: str, frame: Optional[FrameType]) -> Optional[str]: + def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None: """Check a file name against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason @@ -483,8 +483,8 @@ def warn_already_imported_files(self) -> None: elif self.debug and self.debug.should("trace"): self.debug.write( "Didn't trace already imported file {!r}: {}".format( - disp.original_filename, disp.reason - ) + disp.original_filename, disp.reason, + ), ) def warn_unimported_source(self) -> None: @@ -518,7 +518,7 @@ def _warn_about_unmeasured_code(self, pkg: str) -> None: msg = f"Module {pkg} was previously imported, but not measured" self.warn(msg, slug="module-not-measured") - def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]: + def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]: """Find files in the areas of interest that might be untraced. Yields pairs: file path, and responsible plug-in name. @@ -533,13 +533,13 @@ def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]: for src in self.source: yield from self._find_executable_files(src) - def _find_plugin_files(self, src_dir: str) -> Iterable[Tuple[str, str]]: + def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]: """Get executable files from the plugins.""" for plugin in self.plugins.file_tracers: for x_file in plugin.find_executable_files(src_dir): yield x_file, plugin._coverage_plugin_name - def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[str]]]: + def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]: """Find executable files in `src_dir`. Search for files in `src_dir` that can be executed because they @@ -563,7 +563,7 @@ def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[s continue yield file_path, plugin_name - def sys_info(self) -> Iterable[Tuple[str, Any]]: + def sys_info(self) -> Iterable[tuple[str, Any]]: """Our information for Coverage.sys_info. Returns a list of (key, value) pairs. diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py index 0820c816e..4d4dec7d8 100644 --- a/coverage/jsonreport.py +++ b/coverage/jsonreport.py @@ -9,7 +9,7 @@ import json import sys -from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING +from typing import Any, IO, Iterable, TYPE_CHECKING from coverage import __version__ from coverage.report_core import get_analysis_to_report @@ -34,9 +34,9 @@ def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.total = Numbers(self.config.precision) - self.report_data: Dict[str, Any] = {} + self.report_data: dict[str, Any] = {} - def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Generate a json report for `morfs`. `morfs` is a list of modules or file names. @@ -59,7 +59,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): measured_files[file_reporter.relative_filename()] = self.report_one_file( coverage_data, - analysis + analysis, ) self.report_data["files"] = measured_files @@ -89,7 +89,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: return self.total.n_statements and self.total.pc_covered - def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Dict[str, Any]: + def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> dict[str, Any]: """Extract the relevant report data for a single file.""" nums = analysis.numbers self.total += nums @@ -117,17 +117,17 @@ def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Di "missing_branches": nums.n_missing_branches, }) reported_file["executed_branches"] = list( - _convert_branch_arcs(analysis.executed_branch_arcs()) + _convert_branch_arcs(analysis.executed_branch_arcs()), ) reported_file["missing_branches"] = list( - _convert_branch_arcs(analysis.missing_branch_arcs()) + _convert_branch_arcs(analysis.missing_branch_arcs()), ) return reported_file def _convert_branch_arcs( - branch_arcs: Dict[TLineNo, List[TLineNo]], -) -> Iterable[Tuple[TLineNo, TLineNo]]: + branch_arcs: dict[TLineNo, list[TLineNo]], +) -> Iterable[tuple[TLineNo, TLineNo]]: """Convert branch arcs to a list of two-element tuples.""" for source, targets in branch_arcs.items(): for target in targets: diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py index f0e084767..e54bae8c8 100644 --- a/coverage/lcovreport.py +++ b/coverage/lcovreport.py @@ -9,7 +9,7 @@ import hashlib import sys -from typing import IO, Iterable, Optional, TYPE_CHECKING +from typing import IO, Iterable, TYPE_CHECKING from coverage.plugin import FileReporter from coverage.report_core import get_analysis_to_report @@ -35,7 +35,7 @@ def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.total = Numbers(self.coverage.config.precision) - def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Renders the full lcov report. `morfs` is a list of modules or filenames @@ -97,10 +97,10 @@ def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> No missing_arcs = analysis.missing_branch_arcs() executed_arcs = analysis.executed_branch_arcs() for block_number, block_line_number in enumerate( - sorted(analysis.branch_stats().keys()) + sorted(analysis.branch_stats().keys()), ): for branch_number, line_number in enumerate( - sorted(missing_arcs[block_line_number]) + sorted(missing_arcs[block_line_number]), ): # The exit branches have a negative line number, # this will not produce valid lcov. Setting diff --git a/coverage/misc.py b/coverage/misc.py index 0280650d7..2b27efc99 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -21,8 +21,7 @@ from types import ModuleType from typing import ( - Any, Callable, Dict, IO, Iterable, Iterator, List, Mapping, NoReturn, Optional, - Sequence, Tuple, TypeVar, Union, + Any, Callable, IO, Iterable, Iterator, Mapping, NoReturn, Sequence, TypeVar, ) from coverage import env @@ -34,7 +33,7 @@ # pylint: disable=unused-wildcard-import from coverage.exceptions import * # pylint: disable=wildcard-import -ISOLATED_MODULES: Dict[ModuleType, ModuleType] = {} +ISOLATED_MODULES: dict[ModuleType, ModuleType] = {} def isolate_module(mod: ModuleType) -> ModuleType: @@ -80,7 +79,7 @@ def sys_modules_saved() -> Iterator[None]: saver.restore() -def import_third_party(modname: str) -> Tuple[ModuleType, bool]: +def import_third_party(modname: str) -> tuple[ModuleType, bool]: """Import a third-party module we need, but might not be installed. This also cleans out the module after the import, so that coverage won't @@ -140,7 +139,7 @@ def _wrapper(self: TSelf) -> TRetVal: return fn # pragma: not testing -def bool_or_none(b: Any) -> Optional[bool]: +def bool_or_none(b: Any) -> bool | None: """Return bool(b), but preserve None.""" if b is None: return None @@ -180,7 +179,7 @@ def ensure_dir_for_file(path: str) -> None: ensure_dir(os.path.dirname(path)) -def output_encoding(outfile: Optional[IO[str]] = None) -> str: +def output_encoding(outfile: IO[str] | None = None) -> str: """Determine the encoding to use for output written to `outfile` or stdout.""" if outfile is None: outfile = sys.stdout @@ -243,7 +242,7 @@ def _needs_to_implement(that: Any, func_name: str) -> NoReturn: name = f"{klass.__module__}.{klass.__name__}" raise NotImplementedError( - f"{thing} {name!r} needs to implement {func_name}()" + f"{thing} {name!r} needs to implement {func_name}()", ) @@ -318,7 +317,7 @@ def format_local_datetime(dt: datetime.datetime) -> str: return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") -def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType: +def import_local_file(modname: str, modfile: str | None = None) -> ModuleType: """Import a local file as a module. Opens a file in the current directory named `modname`.py, imports it @@ -338,7 +337,7 @@ def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType return mod -def _human_key(s: str) -> Tuple[List[Union[str, int]], str]: +def _human_key(s: str) -> tuple[list[str | int], str]: """Turn a string into a list of string and number chunks. "z23a" -> (["z", 23, "a"], "z23a") @@ -346,7 +345,7 @@ def _human_key(s: str) -> Tuple[List[Union[str, int]], str]: The original string is appended as a last value to ensure the key is unique enough so that "x1y" and "x001y" can be distinguished. """ - def tryint(s: str) -> Union[str, int]: + def tryint(s: str) -> str | int: """If `s` is a number, return an int, else `s` unchanged.""" try: return int(s) @@ -355,7 +354,7 @@ def tryint(s: str) -> Union[str, int]: return ([tryint(c) for c in re.split(r"(\d+)", s)], s) -def human_sorted(strings: Iterable[str]) -> List[str]: +def human_sorted(strings: Iterable[str]) -> list[str]: """Sort the given iterable of strings the way that humans expect. Numeric components in the strings are sorted as numbers. @@ -370,7 +369,7 @@ def human_sorted(strings: Iterable[str]) -> List[str]: def human_sorted_items( items: Iterable[SortableItem], reverse: bool = False, -) -> List[SortableItem]: +) -> list[SortableItem]: """Sort (string, ...) items the way humans expect. The elements of `items` can be any tuple/list. They'll be sorted by the diff --git a/coverage/multiproc.py b/coverage/multiproc.py index ab2bc4a17..6d5a82737 100644 --- a/coverage/multiproc.py +++ b/coverage/multiproc.py @@ -12,7 +12,7 @@ import sys import traceback -from typing import Any, Dict, Optional +from typing import Any from coverage.debug import DebugControl @@ -29,7 +29,7 @@ class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-m def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] """Wrapper around _bootstrap to start coverage.""" - debug: Optional[DebugControl] = None + debug: DebugControl | None = None try: from coverage import Coverage # avoid circular import cov = Coverage(data_suffix=True, auto_data=True) @@ -66,10 +66,10 @@ class Stowaway: def __init__(self, rcfile: str) -> None: self.rcfile = rcfile - def __getstate__(self) -> Dict[str, str]: + def __getstate__(self) -> dict[str, str]: return {"rcfile": self.rcfile} - def __setstate__(self, state: Dict[str, str]) -> None: + def __setstate__(self, state: dict[str, str]) -> None: patch_multiprocessing(state["rcfile"]) @@ -104,7 +104,7 @@ def patch_multiprocessing(rcfile: str) -> None: except (ImportError, AttributeError): pass else: - def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]: + def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]: """Get the original preparation data, and also insert our stowaway.""" d = original_get_preparation_data(name) d["stowaway"] = Stowaway(rcfile) diff --git a/coverage/numbits.py b/coverage/numbits.py index 71b974de5..a4eedfa86 100644 --- a/coverage/numbits.py +++ b/coverage/numbits.py @@ -20,7 +20,7 @@ import sqlite3 from itertools import zip_longest -from typing import Iterable, List +from typing import Iterable def nums_to_numbits(nums: Iterable[int]) -> bytes: @@ -43,7 +43,7 @@ def nums_to_numbits(nums: Iterable[int]) -> bytes: return bytes(b) -def numbits_to_nums(numbits: bytes) -> List[int]: +def numbits_to_nums(numbits: bytes) -> list[int]: """Convert a numbits into a list of numbers. Arguments: diff --git a/coverage/parser.py b/coverage/parser.py index 9349c9ea8..959174c36 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -13,6 +13,7 @@ import token import tokenize +from dataclasses import dataclass from types import CodeType from typing import ( cast, Any, Callable, Dict, Iterable, List, Optional, Protocol, Sequence, @@ -37,9 +38,9 @@ class PythonParser: """ def __init__( self, - text: Optional[str] = None, - filename: Optional[str] = None, - exclude: Optional[str] = None, + text: str | None = None, + filename: str | None = None, + exclude: str | None = None, ) -> None: """ Source can be provided as `text`, the text itself, or `filename`, from @@ -61,45 +62,45 @@ def __init__( self.exclude = exclude # The text lines of the parsed code. - self.lines: List[str] = self.text.split("\n") + self.lines: list[str] = self.text.split("\n") # The normalized line numbers of the statements in the code. Exclusions # are taken into account, and statements are adjusted to their first # lines. - self.statements: Set[TLineNo] = set() + self.statements: set[TLineNo] = set() # The normalized line numbers of the excluded lines in the code, # adjusted to their first lines. - self.excluded: Set[TLineNo] = set() + self.excluded: set[TLineNo] = set() # The raw_* attributes are only used in this class, and in # lab/parser.py to show how this class is working. # The line numbers that start statements, as reported by the line # number table in the bytecode. - self.raw_statements: Set[TLineNo] = set() + self.raw_statements: set[TLineNo] = set() # The raw line numbers of excluded lines of code, as marked by pragmas. - self.raw_excluded: Set[TLineNo] = set() + self.raw_excluded: set[TLineNo] = set() # The line numbers of class definitions. - self.raw_classdefs: Set[TLineNo] = set() + self.raw_classdefs: set[TLineNo] = set() # The line numbers of docstring lines. - self.raw_docstrings: Set[TLineNo] = set() + self.raw_docstrings: set[TLineNo] = set() # Internal detail, used by lab/parser.py. self.show_tokens = False # A dict mapping line numbers to lexical statement starts for # multi-line statements. - self._multiline: Dict[TLineNo, TLineNo] = {} + self._multiline: dict[TLineNo, TLineNo] = {} # Lazily-created arc data, and missing arc descriptions. - self._all_arcs: Optional[Set[TArc]] = None - self._missing_arc_fragments: Optional[TArcFragments] = None + self._all_arcs: set[TArc] | None = None + self._missing_arc_fragments: TArcFragments | None = None - def lines_matching(self, *regexes: str) -> Set[TLineNo]: + def lines_matching(self, *regexes: str) -> set[TLineNo]: """Find the lines matching one of a list of regexes. Returns a set of line numbers, the lines that contain a match for one @@ -153,7 +154,7 @@ def _raw_parse(self) -> None: if self.show_tokens: # pragma: debugging print("%10s %5s %-20r %r" % ( tokenize.tok_name.get(toktype, toktype), - nice_pair((slineno, elineno)), ttext, ltext + nice_pair((slineno, elineno)), ttext, ltext, )) if toktype == token.INDENT: indent += 1 @@ -239,7 +240,7 @@ def first_line(self, lineno: TLineNo) -> TLineNo: lineno = self._multiline.get(lineno, lineno) return lineno - def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]: + def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]: """Map the line numbers in `linenos` to the correct first line of the statement. @@ -248,11 +249,11 @@ def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]: """ return {self.first_line(l) for l in linenos} - def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: """Implement `FileReporter.translate_lines`.""" return self.first_lines(lines) - def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: """Implement `FileReporter.translate_arcs`.""" return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs} @@ -272,7 +273,7 @@ def parse_source(self) -> None: lineno = err.args[1][0] # TokenError raise NotPython( f"Couldn't parse '{self.filename}' as Python source: " + - f"{err.args[0]!r} at line {lineno}" + f"{err.args[0]!r} at line {lineno}", ) from err self.excluded = self.first_lines(self.raw_excluded) @@ -281,7 +282,7 @@ def parse_source(self) -> None: starts = self.raw_statements - ignore self.statements = self.first_lines(starts) - ignore - def arcs(self) -> Set[TArc]: + def arcs(self) -> set[TArc]: """Get information about the arcs available in the code. Returns a set of line number pairs. Line numbers have been normalized @@ -311,13 +312,13 @@ def _analyze_ast(self) -> None: self._missing_arc_fragments = aaa.missing_arc_fragments - def exit_counts(self) -> Dict[TLineNo, int]: + def exit_counts(self) -> dict[TLineNo, int]: """Get a count of exits from that each line. Excluded lines are excluded. """ - exit_counts: Dict[TLineNo, int] = collections.defaultdict(int) + exit_counts: dict[TLineNo, int] = collections.defaultdict(int) for l1, l2 in self.arcs(): if l1 < 0: # Don't ever report -1 as a line number @@ -342,7 +343,7 @@ def missing_arc_description( self, start: TLineNo, end: TLineNo, - executed_arcs: Optional[Iterable[TArc]] = None, + executed_arcs: Iterable[TArc] | None = None, ) -> str: """Provide an English sentence describing a missing arc.""" if self._missing_arc_fragments is None: @@ -390,8 +391,8 @@ class ByteParser: def __init__( self, text: str, - code: Optional[CodeType] = None, - filename: Optional[str] = None, + code: CodeType | None = None, + filename: str | None = None, ) -> None: self.text = text if code is not None: @@ -403,8 +404,8 @@ def __init__( except SyntaxError as synerr: raise NotPython( "Couldn't parse '%s' as Python source: '%s' at line %d" % ( - filename, synerr.msg, synerr.lineno or 0 - ) + filename, synerr.msg, synerr.lineno or 0, + ), ) from synerr def child_parsers(self) -> Iterable[ByteParser]: @@ -462,7 +463,8 @@ def _find_statements(self) -> Iterable[TLineNo]: # AST analysis # -class ArcStart(collections.namedtuple("Arc", "lineno, cause")): +@dataclass(frozen=True, order=True) +class ArcStart: """The information needed to start an arc. `lineno` is the line number the arc starts from. @@ -474,8 +476,8 @@ class ArcStart(collections.namedtuple("Arc", "lineno, cause")): to have `lineno` interpolated into it. """ - def __new__(cls, lineno: TLineNo, cause: Optional[str] = None) -> ArcStart: - return super().__new__(cls, lineno, cause) + lineno: TLineNo + cause: str = "" class TAddArcFn(Protocol): @@ -484,8 +486,8 @@ def __call__( self, start: TLineNo, end: TLineNo, - smsg: Optional[str] = None, - emsg: Optional[str] = None, + smsg: str | None = None, + emsg: str | None = None, ) -> None: ... @@ -501,23 +503,23 @@ class Block: stack. """ # pylint: disable=unused-argument - def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process break exits.""" # Because break can only appear in loops, and most subclasses # implement process_break_exits, this function is never reached. raise AssertionError - def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process continue exits.""" # Because continue can only appear in loops, and most subclasses # implement process_continue_exits, this function is never reached. raise AssertionError - def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process raise exits.""" return False - def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process return exits.""" return False @@ -528,13 +530,13 @@ def __init__(self, start: TLineNo) -> None: # The line number where the loop starts. self.start = start # A set of ArcStarts, the arcs from break statements exiting this loop. - self.break_exits: Set[ArcStart] = set() + self.break_exits: set[ArcStart] = set() - def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: self.break_exits.update(exits) return True - def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc(xit.lineno, self.start, xit.cause) return True @@ -548,7 +550,7 @@ def __init__(self, start: TLineNo, name: str) -> None: # The name of the function. self.name = name - def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, @@ -556,7 +558,7 @@ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: ) return True - def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, @@ -567,7 +569,7 @@ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool class TryBlock(Block): """A block on the block stack representing a `try` block.""" - def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None: + def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None: # The line number of the first "except" handler, if any. self.handler_start = handler_start # The line number of the "finally:" clause, if any. @@ -575,24 +577,24 @@ def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLine # The ArcStarts for breaks/continues/returns/raises inside the "try:" # that need to route through the "finally:" clause. - self.break_from: Set[ArcStart] = set() - self.continue_from: Set[ArcStart] = set() - self.raise_from: Set[ArcStart] = set() - self.return_from: Set[ArcStart] = set() + self.break_from: set[ArcStart] = set() + self.continue_from: set[ArcStart] = set() + self.raise_from: set[ArcStart] = set() + self.return_from: set[ArcStart] = set() - def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.break_from.update(exits) return True return False - def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.continue_from.update(exits) return True return False - def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.handler_start is not None: for xit in exits: add_arc(xit.lineno, self.handler_start, xit.cause) @@ -601,7 +603,7 @@ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: self.raise_from.update(exits) return True - def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.return_from.update(exits) return True @@ -620,15 +622,15 @@ def __init__(self, start: TLineNo) -> None: # The ArcStarts for breaks/continues/returns/raises inside the "with:" # that need to go through the with-statement while exiting. - self.break_from: Set[ArcStart] = set() - self.continue_from: Set[ArcStart] = set() - self.return_from: Set[ArcStart] = set() + self.break_from: set[ArcStart] = set() + self.continue_from: set[ArcStart] = set() + self.return_from: set[ArcStart] = set() def _process_exits( self, - exits: Set[ArcStart], + exits: set[ArcStart], add_arc: TAddArcFn, - from_set: Optional[Set[ArcStart]] = None, + from_set: set[ArcStart] | None = None, ) -> bool: """Helper to process the four kinds of exits.""" for xit in exits: @@ -637,16 +639,16 @@ def _process_exits( from_set.update(exits) return True - def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.break_from) - def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.continue_from) - def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc) - def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.return_from) @@ -680,8 +682,8 @@ class AstArcAnalyzer: def __init__( self, text: str, - statements: Set[TLineNo], - multiline: Dict[TLineNo, TLineNo], + statements: set[TLineNo], + multiline: dict[TLineNo, TLineNo], ) -> None: self.root_node = ast.parse(text) # TODO: I think this is happening in too many places. @@ -696,12 +698,12 @@ def __init__( # Dump the AST so that failing tests have helpful output. print(f"Statements: {self.statements}") print(f"Multiline map: {self.multiline}") - dumpkw: Dict[str, Any] = {} + dumpkw: dict[str, Any] = {} if sys.version_info >= (3, 9): dumpkw["indent"] = 4 print(ast.dump(self.root_node, include_attributes=True, **dumpkw)) - self.arcs: Set[TArc] = set() + self.arcs: set[TArc] = set() # A map from arc pairs to a list of pairs of sentence fragments: # { (start, end): [(startmsg, endmsg), ...], } @@ -709,7 +711,7 @@ def __init__( # For an arc from line 17, they should be usable like: # "Line 17 {endmsg}, because {startmsg}" self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) - self.block_stack: List[Block] = [] + self.block_stack: list[Block] = [] # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0"))) @@ -731,8 +733,8 @@ def add_arc( self, start: TLineNo, end: TLineNo, - smsg: Optional[str] = None, - emsg: Optional[str] = None, + smsg: str | None = None, + emsg: str | None = None, ) -> None: """Add an arc, including message fragments to use if it is missing.""" if self.debug: # pragma: debugging @@ -756,7 +758,7 @@ def line_for_node(self, node: ast.AST) -> TLineNo: node_name = node.__class__.__name__ handler = cast( Optional[Callable[[ast.AST], TLineNo]], - getattr(self, "_line__" + node_name, None) + getattr(self, "_line__" + node_name, None), ) if handler is not None: return handler(node) @@ -811,7 +813,7 @@ def _line__Module(self, node: ast.Module) -> TLineNo: "Import", "ImportFrom", "Nonlocal", "Pass", } - def add_arcs(self, node: ast.AST) -> Set[ArcStart]: + def add_arcs(self, node: ast.AST) -> set[ArcStart]: """Add the arcs for `node`. Return a set of ArcStarts, exits from this node to the next. Because a @@ -830,7 +832,7 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]: node_name = node.__class__.__name__ handler = cast( Optional[Callable[[ast.AST], Set[ArcStart]]], - getattr(self, "_handle__" + node_name, None) + getattr(self, "_handle__" + node_name, None), ) if handler is not None: return handler(node) @@ -847,9 +849,9 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]: def add_body_arcs( self, body: Sequence[ast.AST], - from_start: Optional[ArcStart] = None, - prev_starts: Optional[Set[ArcStart]] = None - ) -> Set[ArcStart]: + from_start: ArcStart | None = None, + prev_starts: set[ArcStart] | None = None, + ) -> set[ArcStart]: """Add arcs for the body of a compound statement. `body` is the body node. `from_start` is a single `ArcStart` that can @@ -877,7 +879,7 @@ def add_body_arcs( prev_starts = self.add_arcs(body_node) return prev_starts - def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]: + def find_non_missing_node(self, node: ast.AST) -> ast.AST | None: """Search `node` looking for a child that has not been optimized away. This might return the node you started with, or it will work recursively @@ -896,7 +898,7 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]: missing_fn = cast( Optional[Callable[[ast.AST], Optional[ast.AST]]], - getattr(self, "_missing__" + node.__class__.__name__, None) + getattr(self, "_missing__" + node.__class__.__name__, None), ) if missing_fn is not None: ret_node = missing_fn(node) @@ -911,7 +913,7 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]: # find_non_missing_node) to find a node to use instead of the missing # node. They can return None if the node should truly be gone. - def _missing__If(self, node: ast.If) -> Optional[ast.AST]: + def _missing__If(self, node: ast.If) -> ast.AST | None: # If the if-node is missing, then one of its children might still be # here, but not both. So return the first of the two that isn't missing. # Use a NodeList to hold the clauses as a single node. @@ -922,7 +924,7 @@ def _missing__If(self, node: ast.If) -> Optional[ast.AST]: return self.find_non_missing_node(NodeList(node.orelse)) return None - def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]: + def _missing__NodeList(self, node: NodeList) -> ast.AST | None: # A NodeList might be a mixture of missing and present nodes. Find the # ones that are present. non_missing_children = [] @@ -938,7 +940,7 @@ def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]: return non_missing_children[0] return NodeList(non_missing_children) - def _missing__While(self, node: ast.While) -> Optional[ast.AST]: + def _missing__While(self, node: ast.While) -> ast.AST | None: body_nodes = self.find_non_missing_node(NodeList(node.body)) if not body_nodes: return None @@ -953,7 +955,7 @@ def _missing__While(self, node: ast.While) -> Optional[ast.AST]: new_while.orelse = [] return new_while - def is_constant_expr(self, node: ast.AST) -> Optional[str]: + def is_constant_expr(self, node: ast.AST) -> str | None: """Is this a compile-time constant?""" node_name = node.__class__.__name__ if node_name in ["Constant", "NameConstant", "Num"]: @@ -978,25 +980,25 @@ def is_constant_expr(self, node: ast.AST) -> Optional[str]: # enclosing loop block, or the nearest enclosing finally block, whichever # is nearer. - def process_break_exits(self, exits: Set[ArcStart]) -> None: + def process_break_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being breaks.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_break_exits(exits, self.add_arc): break - def process_continue_exits(self, exits: Set[ArcStart]) -> None: + def process_continue_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being continues.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_continue_exits(exits, self.add_arc): break - def process_raise_exits(self, exits: Set[ArcStart]) -> None: + def process_raise_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being raises.""" for block in self.nearest_blocks(): if block.process_raise_exits(exits, self.add_arc): break - def process_return_exits(self, exits: Set[ArcStart]) -> None: + def process_return_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being returns.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_return_exits(exits, self.add_arc): @@ -1013,16 +1015,16 @@ def process_return_exits(self, exits: Set[ArcStart]) -> None: # Every node type that represents a statement should have a handler, or it # should be listed in OK_TO_DEFAULT. - def _handle__Break(self, node: ast.Break) -> Set[ArcStart]: + def _handle__Break(self, node: ast.Break) -> set[ArcStart]: here = self.line_for_node(node) break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") self.process_break_exits({break_start}) return set() - def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]: + def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]: """Add arcs for things that can be decorated (classes and functions).""" main_line: TLineNo = node.lineno - last: Optional[TLineNo] = node.lineno + last: TLineNo | None = node.lineno decs = node.decorator_list if decs: last = None @@ -1052,13 +1054,13 @@ def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]: _handle__ClassDef = _handle_decorated - def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]: + def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]: here = self.line_for_node(node) continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") self.process_continue_exits({continue_start}) return set() - def _handle__For(self, node: ast.For) -> Set[ArcStart]: + def _handle__For(self, node: ast.For) -> set[ArcStart]: start = self.line_for_node(node.iter) self.block_stack.append(LoopBlock(start=start)) from_start = ArcStart(start, cause="the loop on line {lineno} never started") @@ -1083,7 +1085,7 @@ def _handle__For(self, node: ast.For) -> Set[ArcStart]: _handle__FunctionDef = _handle_decorated _handle__AsyncFunctionDef = _handle_decorated - def _handle__If(self, node: ast.If) -> Set[ArcStart]: + def _handle__If(self, node: ast.If) -> set[ArcStart]: start = self.line_for_node(node.test) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) @@ -1092,7 +1094,7 @@ def _handle__If(self, node: ast.If) -> Set[ArcStart]: return exits if sys.version_info >= (3, 10): - def _handle__Match(self, node: ast.Match) -> Set[ArcStart]: + def _handle__Match(self, node: ast.Match) -> set[ArcStart]: start = self.line_for_node(node) last_start = start exits = set() @@ -1112,29 +1114,31 @@ def _handle__Match(self, node: ast.Match) -> Set[ArcStart]: exits |= self.add_body_arcs(case.body, from_start=from_start) last_start = case_start if not had_wildcard: - exits.add(from_start) + exits.add( + ArcStart(case_start, cause="the pattern on line {lineno} always matched"), + ) return exits - def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]: + def _handle__NodeList(self, node: NodeList) -> set[ArcStart]: start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) return exits - def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]: + def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]: here = self.line_for_node(node) raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") self.process_raise_exits({raise_start}) # `raise` statement jumps away, no exits from here. return set() - def _handle__Return(self, node: ast.Return) -> Set[ArcStart]: + def _handle__Return(self, node: ast.Return) -> set[ArcStart]: here = self.line_for_node(node) return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") self.process_return_exits({return_start}) # `return` statement jumps away, no exits from here. return set() - def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: + def _handle__Try(self, node: ast.Try) -> set[ArcStart]: if node.handlers: handler_start = self.line_for_node(node.handlers[0]) else: @@ -1167,10 +1171,10 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: else: self.block_stack.pop() - handler_exits: Set[ArcStart] = set() + handler_exits: set[ArcStart] = set() if node.handlers: - last_handler_start: Optional[TLineNo] = None + last_handler_start: TLineNo | None = None for handler_node in node.handlers: handler_start = self.line_for_node(handler_node) if last_handler_start is not None: @@ -1223,7 +1227,7 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: if try_block.raise_from: self.process_raise_exits( - self._combine_finally_starts(try_block.raise_from, final_exits) + self._combine_finally_starts(try_block.raise_from, final_exits), ) if try_block.return_from: @@ -1245,7 +1249,7 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: return exits - def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]: + def _combine_finally_starts(self, starts: set[ArcStart], exits: set[ArcStart]) -> set[ArcStart]: """Helper for building the cause of `finally` branches. "finally" clauses might not execute their exits, and the causes could @@ -1254,13 +1258,13 @@ def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) - """ causes = [] for start in sorted(starts): - if start.cause is not None: + if start.cause: causes.append(start.cause.format(lineno=start.lineno)) cause = " or ".join(causes) exits = {ArcStart(xit.lineno, cause) for xit in exits} return exits - def _handle__While(self, node: ast.While) -> Set[ArcStart]: + def _handle__While(self, node: ast.While) -> set[ArcStart]: start = to_top = self.line_for_node(node.test) constant_test = self.is_constant_expr(node.test) top_is_body0 = False @@ -1289,7 +1293,7 @@ def _handle__While(self, node: ast.While) -> Set[ArcStart]: exits.add(from_start) return exits - def _handle__With(self, node: ast.With) -> Set[ArcStart]: + def _handle__With(self, node: ast.With) -> set[ArcStart]: start = self.line_for_node(node) if env.PYBEHAVIOR.exit_through_with: self.block_stack.append(WithBlock(start=start)) @@ -1304,15 +1308,15 @@ def _handle__With(self, node: ast.With) -> Set[ArcStart]: exits = with_exit if with_block.break_from: self.process_break_exits( - self._combine_finally_starts(with_block.break_from, with_exit) + self._combine_finally_starts(with_block.break_from, with_exit), ) if with_block.continue_from: self.process_continue_exits( - self._combine_finally_starts(with_block.continue_from, with_exit) + self._combine_finally_starts(with_block.continue_from, with_exit), ) if with_block.return_from: self.process_return_exits( - self._combine_finally_starts(with_block.return_from, with_exit) + self._combine_finally_starts(with_block.return_from, with_exit), ) return exits diff --git a/coverage/phystokens.py b/coverage/phystokens.py index 5fd5dacc5..7d8b30c8a 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -13,7 +13,7 @@ import token import tokenize -from typing import Iterable, List, Optional, Set, Tuple +from typing import Iterable from coverage import env from coverage.types import TLineNo, TSourceTokenLines @@ -32,7 +32,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos: Returns the same values as generate_tokens() """ - last_line: Optional[str] = None + last_line: str | None = None last_lineno = -1 last_ttext: str = "" for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: @@ -68,7 +68,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos: yield tokenize.TokenInfo( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), - last_line + last_line, ) last_line = ltext if ttype not in (tokenize.NEWLINE, tokenize.NL): @@ -81,7 +81,7 @@ class SoftKeywordFinder(ast.NodeVisitor): """Helper for finding lines with soft keywords, like match/case lines.""" def __init__(self, source: str) -> None: # This will be the set of line numbers that start with a soft keyword. - self.soft_key_lines: Set[TLineNo] = set() + self.soft_key_lines: set[TLineNo] = set() self.visit(ast.parse(source)) if sys.version_info >= (3, 10): @@ -116,7 +116,7 @@ def source_token_lines(source: str) -> TSourceTokenLines: """ ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} - line: List[Tuple[str, str]] = [] + line: list[tuple[str, str]] = [] col = 0 source = source.expandtabs(8).replace("\r\n", "\n") @@ -182,8 +182,8 @@ class CachedTokenizer: """ def __init__(self) -> None: - self.last_text: Optional[str] = None - self.last_tokens: List[tokenize.TokenInfo] = [] + self.last_text: str | None = None + self.last_tokens: list[tokenize.TokenInfo] = [] def generate_tokens(self, text: str) -> TokenInfos: """A stand-in for `tokenize.generate_tokens`.""" diff --git a/coverage/plugin.py b/coverage/plugin.py index 67dcfbff1..761406688 100644 --- a/coverage/plugin.py +++ b/coverage/plugin.py @@ -117,7 +117,7 @@ def coverage_init(reg, options): import functools from types import FrameType -from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union +from typing import Any, Iterable from coverage import files from coverage.misc import _needs_to_implement @@ -130,7 +130,7 @@ class CoveragePlugin: _coverage_plugin_name: str _coverage_enabled: bool - def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument + def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument """Get a :class:`FileTracer` object for a file. Plug-in type: file tracer. @@ -173,7 +173,7 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable= def file_reporter( self, filename: str, # pylint: disable=unused-argument - ) -> Union[FileReporter, str]: # str should be Literal["python"] + ) -> FileReporter | str: # str should be Literal["python"] """Get the :class:`FileReporter` class to use for a file. Plug-in type: file tracer. @@ -190,7 +190,7 @@ def file_reporter( def dynamic_context( self, frame: FrameType, # pylint: disable=unused-argument - ) -> Optional[str]: + ) -> str | None: """Get the dynamically computed context label for `frame`. Plug-in type: dynamic context. @@ -238,7 +238,7 @@ def configure(self, config: TConfigurable) -> None: """ pass - def sys_info(self) -> Iterable[Tuple[str, Any]]: + def sys_info(self) -> Iterable[tuple[str, Any]]: """Get a list of information useful for debugging. Plug-in type: any. @@ -311,7 +311,7 @@ def dynamic_source_filename( self, filename: str, # pylint: disable=unused-argument frame: FrameType, # pylint: disable=unused-argument - ) -> Optional[str]: + ) -> str | None: """Get a dynamically computed source file name. Some plug-ins need to compute the source file name dynamically for each @@ -326,7 +326,7 @@ def dynamic_source_filename( """ return None - def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: """Get the range of source line numbers for a given a call frame. The call frame is examined, and the source line number in the original @@ -369,7 +369,7 @@ def __init__(self, filename: str) -> None: self.filename = filename def __repr__(self) -> str: - return "<{0.__class__.__name__} filename={0.filename!r}>".format(self) + return f"<{self.__class__.__name__} filename={self.filename!r}>" def relative_filename(self) -> str: """Get the relative file name for this file. @@ -395,7 +395,7 @@ def source(self) -> str: with open(self.filename, encoding="utf-8") as f: return f.read() - def lines(self) -> Set[TLineNo]: + def lines(self) -> set[TLineNo]: """Get the executable lines in this file. Your plug-in must determine which lines in the file were possibly @@ -406,7 +406,7 @@ def lines(self) -> Set[TLineNo]: """ _needs_to_implement(self, "lines") - def excluded_lines(self) -> Set[TLineNo]: + def excluded_lines(self) -> set[TLineNo]: """Get the excluded executable lines in this file. Your plug-in can use any method it likes to allow the user to exclude @@ -419,7 +419,7 @@ def excluded_lines(self) -> Set[TLineNo]: """ return set() - def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: """Translate recorded lines into reported lines. Some file formats will want to report lines slightly differently than @@ -439,7 +439,7 @@ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: """ return set(lines) - def arcs(self) -> Set[TArc]: + def arcs(self) -> set[TArc]: """Get the executable arcs in this file. To support branch coverage, your plug-in needs to be able to indicate @@ -453,7 +453,7 @@ def arcs(self) -> Set[TArc]: """ return set() - def no_branch_lines(self) -> Set[TLineNo]: + def no_branch_lines(self) -> set[TLineNo]: """Get the lines excused from branch coverage in this file. Your plug-in can use any method it likes to allow the user to exclude @@ -466,7 +466,7 @@ def no_branch_lines(self) -> Set[TLineNo]: """ return set() - def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: """Translate recorded arcs into reported arcs. Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of @@ -479,7 +479,7 @@ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: """ return set(arcs) - def exit_counts(self) -> Dict[TLineNo, int]: + def exit_counts(self) -> dict[TLineNo, int]: """Get a count of exits from that each line. To determine which lines are branches, coverage.py looks for lines that @@ -496,7 +496,7 @@ def missing_arc_description( self, start: TLineNo, end: TLineNo, - executed_arcs: Optional[Iterable[TArc]] = None, # pylint: disable=unused-argument + executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument ) -> str: """Provide an English sentence describing a missing arc. diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py index c99fb5e30..7b843a10b 100644 --- a/coverage/plugin_support.py +++ b/coverage/plugin_support.py @@ -10,7 +10,7 @@ import sys from types import FrameType -from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union +from typing import Any, Iterable, Iterator from coverage.exceptions import PluginError from coverage.misc import isolate_module @@ -26,21 +26,21 @@ class Plugins: """The currently loaded collection of coverage.py plugins.""" def __init__(self) -> None: - self.order: List[CoveragePlugin] = [] - self.names: Dict[str, CoveragePlugin] = {} - self.file_tracers: List[CoveragePlugin] = [] - self.configurers: List[CoveragePlugin] = [] - self.context_switchers: List[CoveragePlugin] = [] + self.order: list[CoveragePlugin] = [] + self.names: dict[str, CoveragePlugin] = {} + self.file_tracers: list[CoveragePlugin] = [] + self.configurers: list[CoveragePlugin] = [] + self.context_switchers: list[CoveragePlugin] = [] - self.current_module: Optional[str] = None - self.debug: Optional[TDebugCtl] + self.current_module: str | None = None + self.debug: TDebugCtl | None @classmethod def load_plugins( cls, modules: Iterable[str], config: TPluginConfig, - debug: Optional[TDebugCtl] = None, + debug: TDebugCtl | None = None, ) -> Plugins: """Load plugins from `modules`. @@ -58,7 +58,7 @@ def load_plugins( coverage_init = getattr(mod, "coverage_init", None) if not coverage_init: raise PluginError( - f"Plugin module {module!r} didn't define a coverage_init function" + f"Plugin module {module!r} didn't define a coverage_init function", ) options = config.get_plugin_options(module) @@ -105,7 +105,7 @@ def add_noop(self, plugin: CoveragePlugin) -> None: def _add_plugin( self, plugin: CoveragePlugin, - specialized: Optional[List[CoveragePlugin]], + specialized: list[CoveragePlugin] | None, ) -> None: """Add a plugin object. @@ -166,7 +166,7 @@ def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: self.plugin = plugin self.debug = debug - def file_tracer(self, filename: str) -> Optional[FileTracer]: + def file_tracer(self, filename: str) -> FileTracer | None: tracer = self.plugin.file_tracer(filename) self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") if tracer: @@ -174,7 +174,7 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]: tracer = DebugFileTracerWrapper(tracer, debug) return tracer - def file_reporter(self, filename: str) -> Union[FileReporter, str]: + def file_reporter(self, filename: str) -> FileReporter | str: reporter = self.plugin.file_reporter(filename) assert isinstance(reporter, FileReporter) self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") @@ -183,7 +183,7 @@ def file_reporter(self, filename: str) -> Union[FileReporter, str]: reporter = DebugFileReporterWrapper(filename, reporter, debug) return reporter - def dynamic_context(self, frame: FrameType) -> Optional[str]: + def dynamic_context(self, frame: FrameType) -> str | None: context = self.plugin.dynamic_context(frame) self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") return context @@ -197,7 +197,7 @@ def configure(self, config: TConfigurable) -> None: self.debug.write(f"configure({config!r})") self.plugin.configure(config) - def sys_info(self) -> Iterable[Tuple[str, Any]]: + def sys_info(self) -> Iterable[tuple[str, Any]]: return self.plugin.sys_info() @@ -225,14 +225,14 @@ def has_dynamic_source_filename(self) -> bool: self.debug.write(f"has_dynamic_source_filename() --> {has!r}") return has - def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]: + def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: dyn = self.tracer.dynamic_source_filename(filename, frame) self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( filename, self._show_frame(frame), dyn, )) return dyn - def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: pair = self.tracer.line_number_range(frame) self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") return pair @@ -251,37 +251,37 @@ def relative_filename(self) -> str: self.debug.write(f"relative_filename() --> {ret!r}") return ret - def lines(self) -> Set[TLineNo]: + def lines(self) -> set[TLineNo]: ret = self.reporter.lines() self.debug.write(f"lines() --> {ret!r}") return ret - def excluded_lines(self) -> Set[TLineNo]: + def excluded_lines(self) -> set[TLineNo]: ret = self.reporter.excluded_lines() self.debug.write(f"excluded_lines() --> {ret!r}") return ret - def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: ret = self.reporter.translate_lines(lines) self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") return ret - def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: ret = self.reporter.translate_arcs(arcs) self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") return ret - def no_branch_lines(self) -> Set[TLineNo]: + def no_branch_lines(self) -> set[TLineNo]: ret = self.reporter.no_branch_lines() self.debug.write(f"no_branch_lines() --> {ret!r}") return ret - def exit_counts(self) -> Dict[TLineNo, int]: + def exit_counts(self) -> dict[TLineNo, int]: ret = self.reporter.exit_counts() self.debug.write(f"exit_counts() --> {ret!r}") return ret - def arcs(self) -> Set[TArc]: + def arcs(self) -> set[TArc]: ret = self.reporter.arcs() self.debug.write(f"arcs() --> {ret!r}") return ret diff --git a/coverage/python.py b/coverage/python.py index 3deb6819f..0a522d6b9 100644 --- a/coverage/python.py +++ b/coverage/python.py @@ -9,7 +9,7 @@ import types import zipimport -from typing import Dict, Iterable, Optional, Set, TYPE_CHECKING +from typing import Iterable, TYPE_CHECKING from coverage import env from coverage.exceptions import CoverageException, NoSource @@ -46,7 +46,7 @@ def get_python_source(filename: str) -> str: else: exts = [ext] - source_bytes: Optional[bytes] + source_bytes: bytes | None for ext in exts: try_filename = base + ext if os.path.exists(try_filename): @@ -73,7 +73,7 @@ def get_python_source(filename: str) -> str: return source -def get_zip_bytes(filename: str) -> Optional[bytes]: +def get_zip_bytes(filename: str) -> bytes | None: """Get data from `filename` if it is a zip file path. Returns the bytestring data read from the zip file, or None if no zip file @@ -143,7 +143,7 @@ def source_for_morf(morf: TMorf) -> str: class PythonFileReporter(FileReporter): """Report support for a Python file.""" - def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None: + def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None: self.coverage = coverage filename = source_for_morf(morf) @@ -166,8 +166,8 @@ def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None: name = relative_filename(filename) self.relname = name - self._source: Optional[str] = None - self._parser: Optional[PythonParser] = None + self._source: str | None = None + self._parser: PythonParser | None = None self._excluded = None def __repr__(self) -> str: @@ -188,22 +188,22 @@ def parser(self) -> PythonParser: self._parser.parse_source() return self._parser - def lines(self) -> Set[TLineNo]: + def lines(self) -> set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.statements - def excluded_lines(self) -> Set[TLineNo]: + def excluded_lines(self) -> set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.excluded - def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: return self.parser.translate_lines(lines) - def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: return self.parser.translate_arcs(arcs) @expensive - def no_branch_lines(self) -> Set[TLineNo]: + def no_branch_lines(self) -> set[TLineNo]: assert self.coverage is not None no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), @@ -212,18 +212,18 @@ def no_branch_lines(self) -> Set[TLineNo]: return no_branch @expensive - def arcs(self) -> Set[TArc]: + def arcs(self) -> set[TArc]: return self.parser.arcs() @expensive - def exit_counts(self) -> Dict[TLineNo, int]: + def exit_counts(self) -> dict[TLineNo, int]: return self.parser.exit_counts() def missing_arc_description( self, start: TLineNo, end: TLineNo, - executed_arcs: Optional[Iterable[TArc]] = None, + executed_arcs: Iterable[TArc] | None = None, ) -> str: return self.parser.missing_arc_description(start, end, executed_arcs) diff --git a/coverage/pytracer.py b/coverage/pytracer.py index f527a4040..69d52c948 100644 --- a/coverage/pytracer.py +++ b/coverage/pytracer.py @@ -12,7 +12,7 @@ import threading from types import FrameType, ModuleType -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast +from typing import Any, Callable, Set, cast from coverage import env from coverage.types import ( @@ -63,18 +63,18 @@ def __init__(self) -> None: self.data: TTraceData self.trace_arcs = False self.should_trace: Callable[[str, FrameType], TFileDisposition] - self.should_trace_cache: Dict[str, Optional[TFileDisposition]] - self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None - self.switch_context: Optional[Callable[[Optional[str]], None]] = None + self.should_trace_cache: dict[str, TFileDisposition | None] + self.should_start_context: Callable[[FrameType], str | None] | None = None + self.switch_context: Callable[[str | None], None] | None = None self.warn: TWarnFn # The threading module to use, if any. - self.threading: Optional[ModuleType] = None + self.threading: ModuleType | None = None - self.cur_file_data: Optional[TTraceFileData] = None + self.cur_file_data: TTraceFileData | None = None self.last_line: TLineNo = 0 - self.cur_file_name: Optional[str] = None - self.context: Optional[str] = None + self.cur_file_name: str | None = None + self.context: str | None = None self.started_context = False # The data_stack parallels the Python call stack. Each entry is @@ -85,8 +85,8 @@ def __init__(self) -> None: # this frame. # [2] The last line number executed in this frame. # [3] Boolean: did this frame start a new context? - self.data_stack: List[Tuple[Optional[TTraceFileData], Optional[str], TLineNo, bool]] = [] - self.thread: Optional[threading.Thread] = None + self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = [] + self.thread: threading.Thread | None = None self.stopped = False self._activity = False @@ -106,11 +106,7 @@ def __repr__(self) -> str: def log(self, marker: str, *args: Any) -> None: """For hard-core logging of what this tracer is doing.""" with open("/tmp/debug_trace.txt", "a") as f: - f.write("{} {}[{}]".format( - marker, - self.id, - len(self.data_stack), - )) + f.write(f"{marker} {self.id}[{len(self.data_stack)}]") if 0: # if you want thread ids.. f.write(".{:x}.{:x}".format( # type: ignore[unreachable] self.thread.ident, @@ -131,8 +127,8 @@ def _trace( frame: FrameType, event: str, arg: Any, # pylint: disable=unused-argument - lineno: Optional[TLineNo] = None, # pylint: disable=unused-argument - ) -> Optional[TTraceFn]: + lineno: TLineNo | None = None, # pylint: disable=unused-argument + ) -> TTraceFn | None: """The trace function passed to sys.settrace.""" if THIS_FILE in frame.f_code.co_filename: @@ -160,7 +156,7 @@ def _trace( "Empty stack!", frame.f_code.co_filename, frame.f_lineno, - frame.f_code.co_name + frame.f_code.co_name, ) return None @@ -190,7 +186,7 @@ def _trace( self.cur_file_name, self.last_line, started_context, - ) + ), ) # Improve tracing performance: when calling a function, both caller @@ -345,6 +341,6 @@ def reset_activity(self) -> None: """Reset the activity() flag.""" self._activity = False - def get_stats(self) -> Optional[Dict[str, int]]: + def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" return None diff --git a/coverage/report.py b/coverage/report.py index 93cc8fb4d..42f7b5aec 100644 --- a/coverage/report.py +++ b/coverage/report.py @@ -7,7 +7,7 @@ import sys -from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING +from typing import Any, IO, Iterable, TYPE_CHECKING from coverage.exceptions import ConfigError, NoDataError from coverage.misc import human_sorted_items @@ -27,11 +27,11 @@ def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.branches = coverage.get_data().has_arcs() - self.outfile: Optional[IO[str]] = None + self.outfile: IO[str] | None = None self.output_format = self.config.format or "text" if self.output_format not in {"text", "markdown", "total"}: raise ConfigError(f"Unknown report format choice: {self.output_format!r}") - self.fr_analysis: List[Tuple[FileReporter, Analysis]] = [] + self.fr_analysis: list[tuple[FileReporter, Analysis]] = [] self.skipped_count = 0 self.empty_count = 0 self.total = Numbers(precision=self.config.precision) @@ -48,10 +48,10 @@ def write_items(self, items: Iterable[str]) -> None: def _report_text( self, - header: List[str], - lines_values: List[List[Any]], - total_line: List[Any], - end_lines: List[str], + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], ) -> None: """Internal method that prints report data in text format. @@ -109,10 +109,10 @@ def _report_text( def _report_markdown( self, - header: List[str], - lines_values: List[List[Any]], - total_line: List[Any], - end_lines: List[str], + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], ) -> None: """Internal method that prints report data in markdown format. @@ -138,7 +138,7 @@ def _report_markdown( header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] header_str = "".join(header_items) rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] + - ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]] + ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]], ) # Write the header @@ -156,7 +156,7 @@ def _report_markdown( # Write the TOTAL line formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |")) - total_line_items: List[str] = [] + total_line_items: list[str] = [] for item, value in zip(header, total_line): if value == "": insert = value @@ -169,7 +169,7 @@ def _report_markdown( for end_line in end_lines: self.write(end_line) - def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float: + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: """Writes a report summarizing coverage statistics per module. `outfile` is a text-mode file object to write the summary to. @@ -252,7 +252,7 @@ def tabular_report(self) -> None: if self.config.skip_covered and self.skipped_count: file_suffix = "s" if self.skipped_count>1 else "" end_lines.append( - f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage." + f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.", ) if self.config.skip_empty and self.empty_count: file_suffix = "s" if self.empty_count > 1 else "" diff --git a/coverage/report_core.py b/coverage/report_core.py index 1535bf8f7..f6bb1f373 100644 --- a/coverage/report_core.py +++ b/coverage/report_core.py @@ -8,7 +8,7 @@ import sys from typing import ( - Callable, Iterable, Iterator, IO, Optional, Protocol, Tuple, TYPE_CHECKING, + Callable, Iterable, Iterator, IO, Protocol, TYPE_CHECKING, ) from coverage.exceptions import NoDataError, NotPython @@ -27,14 +27,14 @@ class Reporter(Protocol): report_type: str - def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Generate a report of `morfs`, written to `outfile`.""" def render_report( output_path: str, reporter: Reporter, - morfs: Optional[Iterable[TMorf]], + morfs: Iterable[TMorf] | None, msgfn: Callable[[str], None], ) -> float: """Run a one-file report generator, managing the output file. @@ -72,8 +72,8 @@ def render_report( def get_analysis_to_report( coverage: Coverage, - morfs: Optional[Iterable[TMorf]], -) -> Iterator[Tuple[FileReporter, Analysis]]: + morfs: Iterable[TMorf] | None, +) -> Iterator[tuple[FileReporter, Analysis]]: """Get the files to report on. For each morf in `morfs`, if it should be reported on (based on the omit diff --git a/coverage/results.py b/coverage/results.py index f5f9a3719..45cc4f198 100644 --- a/coverage/results.py +++ b/coverage/results.py @@ -7,7 +7,7 @@ import collections -from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING +from typing import Callable, Iterable, TYPE_CHECKING from coverage.debug import auto_repr from coverage.exceptions import ConfigError @@ -86,18 +86,18 @@ def has_arcs(self) -> bool: """Were arcs measured in this result?""" return self.data.has_arcs() - def arc_possibilities(self) -> List[TArc]: + def arc_possibilities(self) -> list[TArc]: """Returns a sorted list of the arcs in the code.""" return self._arc_possibilities - def arcs_executed(self) -> List[TArc]: + def arcs_executed(self) -> list[TArc]: """Returns a sorted list of the arcs actually executed in the code.""" executed: Iterable[TArc] executed = self.data.arcs(self.filename) or [] executed = self.file_reporter.translate_arcs(executed) return sorted(executed) - def arcs_missing(self) -> List[TArc]: + def arcs_missing(self) -> list[TArc]: """Returns a sorted list of the un-executed arcs in the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() @@ -109,7 +109,7 @@ def arcs_missing(self) -> List[TArc]: ) return sorted(missing) - def arcs_unpredicted(self) -> List[TArc]: + def arcs_unpredicted(self) -> list[TArc]: """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() @@ -126,7 +126,7 @@ def arcs_unpredicted(self) -> List[TArc]: ) return sorted(unpredicted) - def _branch_lines(self) -> List[TLineNo]: + def _branch_lines(self) -> list[TLineNo]: """Returns a list of line numbers that have more than one exit.""" return [l1 for l1,count in self.exit_counts.items() if count > 1] @@ -134,7 +134,7 @@ def _total_branches(self) -> int: """How many total branches are there?""" return sum(count for count in self.exit_counts.values() if count > 1) - def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: + def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} @@ -148,7 +148,7 @@ def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: mba[l1].append(l2) return mba - def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: + def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: """Return arcs that were executed from branch lines. Returns {l1:[l2a,l2b,...], ...} @@ -162,7 +162,7 @@ def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: eba[l1].append(l2) return eba - def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]: + def branch_stats(self) -> dict[TLineNo, tuple[int, int]]: """Get stats about branches. Returns a dict mapping line numbers to a tuple: @@ -211,7 +211,7 @@ def __init__( __repr__ = auto_repr - def init_args(self) -> List[int]: + def init_args(self) -> list[int]: """Return a list for __init__(*args) to recreate this object.""" return [ self._precision, @@ -274,7 +274,7 @@ def pc_str_width(self) -> int: return width @property - def ratio_covered(self) -> Tuple[int, int]: + def ratio_covered(self) -> tuple[int, int]: """Return a numerator and denominator for the coverage ratio.""" numerator = self.n_executed + self.n_executed_branches denominator = self.n_statements + self.n_branches @@ -304,7 +304,7 @@ def __radd__(self, other: int) -> Numbers: def _line_ranges( statements: Iterable[TLineNo], lines: Iterable[TLineNo], -) -> List[Tuple[TLineNo, TLineNo]]: +) -> list[tuple[TLineNo, TLineNo]]: """Produce a list of ranges for `format_lines`.""" statements = sorted(statements) lines = sorted(lines) @@ -331,7 +331,7 @@ def _line_ranges( def format_lines( statements: Iterable[TLineNo], lines: Iterable[TLineNo], - arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]] = None, + arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None, ) -> str: """Nicely format a list of line numbers. diff --git a/coverage/sqldata.py b/coverage/sqldata.py index ad3e1b97d..f12ccd7a9 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -21,8 +21,8 @@ import zlib from typing import ( - cast, Any, Collection, Dict, List, Mapping, - Optional, Sequence, Set, Tuple, Union, + cast, Any, Collection, Mapping, + Sequence, ) from coverage.debug import NoDebugging, auto_repr @@ -212,11 +212,11 @@ class CoverageData: def __init__( self, - basename: Optional[FilePath] = None, - suffix: Optional[Union[str, bool]] = None, + basename: FilePath | None = None, + suffix: str | bool | None = None, no_disk: bool = False, - warn: Optional[TWarnFn] = None, - debug: Optional[TDebugCtl] = None, + warn: TWarnFn | None = None, + debug: TDebugCtl | None = None, ) -> None: """Create a :class:`CoverageData` object to hold coverage-measured data. @@ -240,9 +240,9 @@ def __init__( self._choose_filename() # Maps filenames to row ids. - self._file_map: Dict[str, int] = {} + self._file_map: dict[str, int] = {} # Maps thread ids to SqliteDb objects. - self._dbs: Dict[int, SqliteDb] = {} + self._dbs: dict[int, SqliteDb] = {} self._pid = os.getpid() # Synchronize the operations used during collection. self._lock = threading.RLock() @@ -253,9 +253,9 @@ def __init__( self._has_lines = False self._has_arcs = False - self._current_context: Optional[str] = None - self._current_context_id: Optional[int] = None - self._query_context_ids: Optional[List[int]] = None + self._current_context: str | None = None + self._current_context_id: int | None = None + self._query_context_ids: list[int] | None = None __repr__ = auto_repr @@ -298,16 +298,16 @@ def _read_db(self) -> None: else: raise DataError( "Data file {!r} doesn't seem to be a coverage data file: {}".format( - self._filename, exc - ) + self._filename, exc, + ), ) from exc else: schema_version = row[0] if schema_version != SCHEMA_VERSION: raise DataError( "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( - self._filename, schema_version, SCHEMA_VERSION - ) + self._filename, schema_version, SCHEMA_VERSION, + ), ) row = db.execute_one("select value from meta where key = 'has_arcs'") @@ -396,7 +396,7 @@ def loads(self, data: bytes) -> None: self._debug.write(f"Loading data into data file {self._filename!r}") if data[:1] != b"z": raise DataError( - f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)" + f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)", ) script = zlib.decompress(data[1:]).decode("utf-8") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) @@ -405,7 +405,7 @@ def loads(self, data: bytes) -> None: self._read_db() self._have_used = True - def _file_id(self, filename: str, add: bool = False) -> Optional[int]: + def _file_id(self, filename: str, add: bool = False) -> int | None: """Get the file id for `filename`. If filename is not in the database yet, add it if `add` is True. @@ -416,11 +416,11 @@ def _file_id(self, filename: str, add: bool = False) -> Optional[int]: with self._connect() as con: self._file_map[filename] = con.execute_for_rowid( "insert or replace into file (path) values (?)", - (filename,) + (filename,), ) return self._file_map.get(filename) - def _context_id(self, context: str) -> Optional[int]: + def _context_id(self, context: str) -> int | None: """Get the id for a context.""" assert context is not None self._start_using() @@ -432,7 +432,7 @@ def _context_id(self, context: str) -> Optional[int]: return None @_locked - def set_context(self, context: Optional[str]) -> None: + def set_context(self, context: str | None) -> None: """Set the current context for future :meth:`add_lines` etc. `context` is a str, the name of the context to use for the next data @@ -456,7 +456,7 @@ def _set_context_id(self) -> None: with self._connect() as con: self._current_context_id = con.execute_for_rowid( "insert into context (context) values (?)", - (context,) + (context,), ) def base_filename(self) -> str: @@ -486,7 +486,7 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: """ if self._debug.should("dataop"): self._debug.write("Adding lines: %d files, %d lines total" % ( - len(line_data), sum(len(lines) for lines in line_data.values()) + len(line_data), sum(len(lines) for lines in line_data.values()), )) if self._debug.should("dataop2"): for filename, linenos in sorted(line_data.items()): @@ -524,7 +524,7 @@ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: """ if self._debug.should("dataop"): self._debug.write("Adding arcs: %d files, %d arcs total" % ( - len(arc_data), sum(len(arcs) for arcs in arc_data.values()) + len(arc_data), sum(len(arcs) for arcs in arc_data.values()), )) if self._debug.should("dataop2"): for filename, arcs in sorted(arc_data.items()): @@ -564,7 +564,7 @@ def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None with self._connect() as con: con.execute_void( "insert or ignore into meta (key, value) values (?, ?)", - ("has_arcs", str(int(arcs))) + ("has_arcs", str(int(arcs))), ) @_locked @@ -588,12 +588,12 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( filename, existing_plugin, plugin_name, - ) + ), ) elif plugin_name: con.execute_void( "insert into tracer (file_id, tracer) values (?, ?)", - (file_id, plugin_name) + (file_id, plugin_name), ) def touch_file(self, filename: str, plugin_name: str = "") -> None: @@ -604,7 +604,7 @@ def touch_file(self, filename: str, plugin_name: str = "") -> None: """ self.touch_files([filename], plugin_name) - def touch_files(self, filenames: Collection[str], plugin_name: Optional[str] = None) -> None: + def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None: """Ensure that `filenames` appear in the data, empty if needed. `plugin_name` is the name of the plugin responsible for these files. @@ -647,7 +647,7 @@ def purge_files(self, filenames: Collection[str]) -> None: continue con.execute_void(sql, (file_id,)) - def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None) -> None: + def update(self, other_data: CoverageData, aliases: PathAliases | None = None) -> None: """Update this data with data from several other :class:`CoverageData` instances. If `aliases` is provided, it's a `PathAliases` object that is used to @@ -685,7 +685,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None "select file.path, context.context, arc.fromno, arc.tono " + "from arc " + "inner join file on file.id = arc.file_id " + - "inner join context on context.id = arc.context_id" + "inner join context on context.id = arc.context_id", ) as cur: arcs = [ (files[path], context, fromno, tono) @@ -697,9 +697,9 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + - "inner join context on context.id = line_bits.context_id" + "inner join context on context.id = line_bits.context_id", ) as cur: - lines: Dict[Tuple[str, str], bytes] = {} + lines: dict[tuple[str, str], bytes] = {} for path, context, numbits in cur: key = (files[path], context) if key in lines: @@ -710,7 +710,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None with con.execute( "select file.path, tracer " + "from tracer " + - "inner join file on file.id = tracer.file_id" + "inner join file on file.id = tracer.file_id", ) as cur: tracers = {files[path]: tracer for (path, tracer) in cur} @@ -726,7 +726,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None this_tracers = {path: "" for path, in cur} with con.execute( "select file.path, tracer from tracer " + - "inner join file on file.id = tracer.file_id" + "inner join file on file.id = tracer.file_id", ) as cur: this_tracers.update({ aliases.map(path): tracer @@ -736,14 +736,14 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None # Create all file and context rows in the DB. con.executemany_void( "insert or ignore into file (path) values (?)", - ((file,) for file in files.values()) + ((file,) for file in files.values()), ) with con.execute("select id, path from file") as cur: file_ids = {path: id for id, path in cur} self._file_map.update(file_ids) con.executemany_void( "insert or ignore into context (context) values (?)", - ((context,) for context in contexts) + ((context,) for context in contexts), ) with con.execute("select id, context from context") as cur: context_ids = {context: id for id, context in cur} @@ -759,8 +759,8 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None if this_tracer is not None and this_tracer != other_tracer: raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( - path, this_tracer, other_tracer - ) + path, this_tracer, other_tracer, + ), ) tracer_map[path] = other_tracer @@ -777,7 +777,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + - "inner join context on context.id = line_bits.context_id" + "inner join context on context.id = line_bits.context_id", ) as cur: for path, context, numbits in cur: key = (aliases.map(path), context) @@ -792,7 +792,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None con.executemany_void( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", - arc_rows + arc_rows, ) if lines: @@ -804,11 +804,11 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None [ (file_ids[file], context_ids[context], numbits) for (file, context), numbits in lines.items() - ] + ], ) con.executemany_void( "insert or ignore into tracer (file_id, tracer) values (?, ?)", - ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) + ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()), ) if not self._no_disk: @@ -863,7 +863,7 @@ def has_arcs(self) -> bool: """Does the database have arcs (True) or lines (False).""" return bool(self._has_arcs) - def measured_files(self) -> Set[str]: + def measured_files(self) -> set[str]: """A set of all files that have been measured. Note that a file may be mentioned as measured even though no lines or @@ -872,7 +872,7 @@ def measured_files(self) -> Set[str]: """ return set(self._file_map) - def measured_contexts(self) -> Set[str]: + def measured_contexts(self) -> set[str]: """A set of all contexts that have been measured. .. versionadded:: 5.0 @@ -884,7 +884,7 @@ def measured_contexts(self) -> Set[str]: contexts = {row[0] for row in cur} return contexts - def file_tracer(self, filename: str) -> Optional[str]: + def file_tracer(self, filename: str) -> str | None: """Get the plugin name of the file tracer for a file. Returns the name of the plugin that handles this file. If the file was @@ -918,7 +918,7 @@ def set_query_context(self, context: str) -> None: with con.execute("select id from context where context = ?", (context,)) as cur: self._query_context_ids = [row[0] for row in cur.fetchall()] - def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None: + def set_query_contexts(self, contexts: Sequence[str] | None) -> None: """Set a number of contexts for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` @@ -939,7 +939,7 @@ def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None: else: self._query_context_ids = None - def lines(self, filename: str) -> Optional[List[TLineNo]]: + def lines(self, filename: str) -> list[TLineNo] | None: """Get the list of lines executed for a source file. If the file was not measured, returns None. A file might be measured, @@ -974,7 +974,7 @@ def lines(self, filename: str) -> Optional[List[TLineNo]]: nums.update(numbits_to_nums(row[0])) return list(nums) - def arcs(self, filename: str) -> Optional[List[TArc]]: + def arcs(self, filename: str) -> list[TArc] | None: """Get the list of arcs executed for a file. If the file was not measured, returns None. A file might be measured, @@ -1006,7 +1006,7 @@ def arcs(self, filename: str) -> Optional[List[TArc]]: with con.execute(query, data) as cur: return list(cur) - def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]: + def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: """Get the contexts for each line in a file. Returns: @@ -1058,7 +1058,7 @@ def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]: return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} @classmethod - def sys_info(cls) -> List[Tuple[str, Any]]: + def sys_info(cls) -> list[tuple[str, Any]]: """Our information for `Coverage.sys_info`. Returns a list of (key, value) pairs. @@ -1078,7 +1078,7 @@ def sys_info(cls) -> List[Tuple[str, Any]]: ] -def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]: +def filename_suffix(suffix: str | bool | None) -> str | None: """Compute a filename suffix for a data file. If `suffix` is a string or None, simply return it. If `suffix` is True, diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py index 468436bdd..0a3e83755 100644 --- a/coverage/sqlitedb.py +++ b/coverage/sqlitedb.py @@ -9,7 +9,7 @@ import re import sqlite3 -from typing import cast, Any, Iterable, Iterator, List, Optional, Tuple +from typing import cast, Any, Iterable, Iterator, Tuple from coverage.debug import auto_repr, clipped_repr, exc_one_line from coverage.exceptions import DataError @@ -32,7 +32,7 @@ def __init__(self, filename: str, debug: TDebugCtl) -> None: self.debug = debug self.filename = filename self.nest = 0 - self.con: Optional[sqlite3.Connection] = None + self.con: sqlite3.Connection | None = None __repr__ = auto_repr @@ -64,7 +64,7 @@ def _connect(self) -> None: if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"): # Turn off defensive mode, so that journal_mode=off can succeed. self.con.setconfig( # type: ignore[attr-defined, unused-ignore] - sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False + sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False, ) # This pragma makes writing faster. It disables rollbacks, but we never need them. @@ -174,7 +174,7 @@ def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int: self.debug.write(f"Row id result: {rowid!r}") return rowid - def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tuple[Any, ...]]: + def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None: """Execute a statement and return the one row that results. This is like execute(sql, parameters).fetchone(), except it is @@ -192,7 +192,7 @@ def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tupl else: raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") - def _executemany(self, sql: str, data: List[Any]) -> sqlite3.Cursor: + def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: """Same as :meth:`python:sqlite3.Connection.executemany`.""" if self.debug.should("sql"): final = ":" if self.debug.should("sqldata") else "" diff --git a/coverage/sysmon.py b/coverage/sysmon.py index e6fe28fc3..65c5b6e77 100644 --- a/coverage/sysmon.py +++ b/coverage/sysmon.py @@ -5,7 +5,6 @@ from __future__ import annotations -import dataclasses import functools import inspect import os @@ -14,13 +13,11 @@ import threading import traceback +from dataclasses import dataclass from types import CodeType, FrameType from typing import ( Any, Callable, - Dict, - List, - Optional, Set, TYPE_CHECKING, cast, @@ -73,9 +70,9 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any: assert sys_monitoring is not None short_stack = functools.partial( - short_stack, full=True, short_filenames=True, frame_ids=True + short_stack, full=True, short_filenames=True, frame_ids=True, ) - seen_threads: Set[int] = set() + seen_threads: set[int] = set() def log(msg: str) -> None: """Write a message to our detailed debugging log(s).""" @@ -108,7 +105,7 @@ def arg_repr(arg: Any) -> str: ) return repr(arg) - def panopticon(*names: Optional[str]) -> AnyCallable: + def panopticon(*names: str | None) -> AnyCallable: """Decorate a function to log its calls.""" def _decorator(method: AnyCallable) -> AnyCallable: @@ -145,7 +142,7 @@ def _wrapped(self: Any, *args: Any) -> Any: def log(msg: str) -> None: """Write a message to our detailed debugging log(s), but not really.""" - def panopticon(*names: Optional[str]) -> AnyCallable: + def panopticon(*names: str | None) -> AnyCallable: """Decorate a function to log its calls, but not really.""" def _decorator(meth: AnyCallable) -> AnyCallable: @@ -154,17 +151,17 @@ def _decorator(meth: AnyCallable) -> AnyCallable: return _decorator -@dataclasses.dataclass +@dataclass class CodeInfo: """The information we want about each code object.""" tracing: bool - file_data: Optional[TTraceFileData] + file_data: TTraceFileData | None # TODO: what is byte_to_line for? - byte_to_line: Dict[int, int] | None + byte_to_line: dict[int, int] | None -def bytes_to_lines(code: CodeType) -> Dict[int, int]: +def bytes_to_lines(code: CodeType) -> dict[int, int]: """Make a dict mapping byte code offsets to line numbers.""" b2l = {} for bstart, bend, lineno in code.co_lines(): @@ -184,24 +181,24 @@ def __init__(self, tool_id: int) -> None: self.data: TTraceData self.trace_arcs = False self.should_trace: Callable[[str, FrameType], TFileDisposition] - self.should_trace_cache: Dict[str, Optional[TFileDisposition]] + self.should_trace_cache: dict[str, TFileDisposition | None] # TODO: should_start_context and switch_context are unused! # Change tests/testenv.py:DYN_CONTEXTS when this is updated. - self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None - self.switch_context: Optional[Callable[[Optional[str]], None]] = None + self.should_start_context: Callable[[FrameType], str | None] | None = None + self.switch_context: Callable[[str | None], None] | None = None # TODO: warn is unused. self.warn: TWarnFn self.myid = tool_id # Map id(code_object) -> CodeInfo - self.code_infos: Dict[int, CodeInfo] = {} + self.code_infos: dict[int, CodeInfo] = {} # A list of code_objects, just to keep them alive so that id's are # useful as identity. - self.code_objects: List[CodeType] = [] - self.last_lines: Dict[FrameType, int] = {} + self.code_objects: list[CodeType] = [] + self.last_lines: dict[FrameType, int] = {} # Map id(code_object) -> code_object - self.local_event_codes: Dict[int, CodeType] = {} + self.local_event_codes: dict[int, CodeType] = {} self.sysmon_on = False self.stats = { @@ -270,7 +267,7 @@ def reset_activity(self) -> None: """Reset the activity() flag.""" self._activity = False - def get_stats(self) -> Optional[Dict[str, int]]: + def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" return None @@ -359,7 +356,7 @@ def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorRet @panopticon("code", "@") def sysmon_py_resume_arcs( - self, code: CodeType, instruction_offset: int + self, code: CodeType, instruction_offset: int, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_RESUME events for branch coverage.""" frame = self.callers_frame() @@ -367,7 +364,7 @@ def sysmon_py_resume_arcs( @panopticon("code", "@", None) def sysmon_py_return_arcs( - self, code: CodeType, instruction_offset: int, retval: object + self, code: CodeType, instruction_offset: int, retval: object, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_RETURN events for branch coverage.""" frame = self.callers_frame() @@ -384,7 +381,7 @@ def sysmon_py_return_arcs( @panopticon("code", "@", "exc") def sysmon_py_unwind_arcs( - self, code: CodeType, instruction_offset: int, exception: BaseException + self, code: CodeType, instruction_offset: int, exception: BaseException, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_UNWIND events for branch coverage.""" frame = self.callers_frame() diff --git a/coverage/templite.py b/coverage/templite.py index 11ea847be..4e7491220 100644 --- a/coverage/templite.py +++ b/coverage/templite.py @@ -15,7 +15,7 @@ import re from typing import ( - Any, Callable, Dict, List, NoReturn, Optional, Set, Union, cast, + Any, Callable, Dict, NoReturn, cast, ) @@ -33,7 +33,7 @@ class CodeBuilder: """Build source code conveniently.""" def __init__(self, indent: int = 0) -> None: - self.code: List[Union[str, CodeBuilder]] = [] + self.code: list[str | CodeBuilder] = [] self.indent_level = indent def __str__(self) -> str: @@ -63,14 +63,14 @@ def dedent(self) -> None: """Decrease the current indent for following lines.""" self.indent_level -= self.INDENT_STEP - def get_globals(self) -> Dict[str, Any]: + def get_globals(self) -> dict[str, Any]: """Execute the code, and return a dict of globals it defines.""" # A check that the caller really finished all the blocks they started. assert self.indent_level == 0 # Get the Python source as a single string. python_source = str(self) # Execute the source, defining globals, and return them. - global_namespace: Dict[str, Any] = {} + global_namespace: dict[str, Any] = {} exec(python_source, global_namespace) return global_namespace @@ -117,7 +117,7 @@ class Templite: }) """ - def __init__(self, text: str, *contexts: Dict[str, Any]) -> None: + def __init__(self, text: str, *contexts: dict[str, Any]) -> None: """Construct a Templite with the given `text`. `contexts` are dictionaries of values to use for future renderings. @@ -128,8 +128,8 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None: for context in contexts: self.context.update(context) - self.all_vars: Set[str] = set() - self.loop_vars: Set[str] = set() + self.all_vars: set[str] = set() + self.loop_vars: set[str] = set() # We construct a function in source form, then compile it and hold onto # it, and execute it to render the template. @@ -143,7 +143,7 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None: code.add_line("extend_result = result.extend") code.add_line("to_str = str") - buffered: List[str] = [] + buffered: list[str] = [] def flush_output() -> None: """Force `buffered` to the code builder.""" @@ -194,10 +194,7 @@ def flush_output() -> None: ops_stack.append("for") self._variable(words[1], self.loop_vars) code.add_line( - "for c_{} in {}:".format( - words[1], - self._expr_code(words[3]) - ) + f"for c_{words[1]} in {self._expr_code(words[3])}:", ) code.indent() elif words[0] == "joined": @@ -241,7 +238,7 @@ def flush_output() -> None: self._render_function = cast( Callable[ [Dict[str, Any], Callable[..., Any]], - str + str, ], code.get_globals()["render_function"], ) @@ -268,7 +265,7 @@ def _syntax_error(self, msg: str, thing: Any) -> NoReturn: """Raise a syntax error using `msg`, and showing `thing`.""" raise TempliteSyntaxError(f"{msg}: {thing!r}") - def _variable(self, name: str, vars_set: Set[str]) -> None: + def _variable(self, name: str, vars_set: set[str]) -> None: """Track that `name` is used as a variable. Adds the name to `vars_set`, a set of variable names. @@ -280,7 +277,7 @@ def _variable(self, name: str, vars_set: Set[str]) -> None: self._syntax_error("Not a valid name", name) vars_set.add(name) - def render(self, context: Optional[Dict[str, Any]] = None) -> str: + def render(self, context: dict[str, Any] | None = None) -> str: """Render this template by applying it to `context`. `context` is a dictionary of values to use in this rendering. @@ -302,7 +299,7 @@ def _do_dots(self, value: Any, *dots: str) -> Any: value = value[dot] except (TypeError, KeyError) as exc: raise TempliteValueError( - f"Couldn't evaluate {value!r}.{dot}" + f"Couldn't evaluate {value!r}.{dot}", ) from exc if callable(value): value = value() diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py index 139cb2c1b..1ba282d08 100644 --- a/coverage/tomlconfig.py +++ b/coverage/tomlconfig.py @@ -8,7 +8,7 @@ import os import re -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar +from typing import Any, Callable, Iterable, TypeVar from coverage import env from coverage.exceptions import ConfigError @@ -40,9 +40,9 @@ class TomlConfigParser: def __init__(self, our_file: bool) -> None: self.our_file = our_file - self.data: Dict[str, Any] = {} + self.data: dict[str, Any] = {} - def read(self, filenames: Iterable[str]) -> List[str]: + def read(self, filenames: Iterable[str]) -> list[str]: # RawConfigParser takes a filename or list of filenames, but we only # ever call this with a single filename. assert isinstance(filenames, (bytes, str, os.PathLike)) @@ -67,7 +67,7 @@ def read(self, filenames: Iterable[str]) -> List[str]: raise ConfigError(msg.format(filename)) return [] - def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSectionOut]]: + def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]: """Get a section from the data. Arguments: @@ -94,7 +94,7 @@ def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSec return None, None return real_section, data - def _get(self, section: str, option: str) -> Tuple[str, TConfigValueOut]: + def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]: """Like .get, but returns the real section name and the value.""" name, data = self._get_section(section) if data is None: @@ -123,7 +123,7 @@ def has_option(self, section: str, option: str) -> bool: return False return option in data - def real_section(self, section: str) -> Optional[str]: + def real_section(self, section: str) -> str | None: name, _ = self._get_section(section) return name @@ -131,7 +131,7 @@ def has_section(self, section: str) -> bool: name, _ = self._get_section(section) return bool(name) - def options(self, section: str) -> List[str]: + def options(self, section: str) -> list[str]: _, data = self._get_section(section) if data is None: raise ConfigError(f"No section: {section!r}") @@ -150,8 +150,8 @@ def _check_type( section: str, option: str, value: Any, - type_: Type[TWant], - converter: Optional[Callable[[Any], TWant]], + type_: type[TWant], + converter: Callable[[Any], TWant] | None, type_desc: str, ) -> TWant: """Check that `value` has the type we want, converting if needed. @@ -165,10 +165,10 @@ def _check_type( return converter(value) except Exception as e: raise ValueError( - f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}" + f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}", ) from e raise ValueError( - f"Option [{section}]{option} is not {type_desc}: {value!r}" + f"Option [{section}]{option} is not {type_desc}: {value!r}", ) def getboolean(self, section: str, option: str) -> bool: @@ -176,18 +176,18 @@ def getboolean(self, section: str, option: str) -> bool: bool_strings = {"true": True, "false": False} return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") - def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]: + def _get_list(self, section: str, option: str) -> tuple[str, list[str]]: """Get a list of strings, substituting environment variables in the elements.""" name, values = self._get(section, option) values = self._check_type(name, option, values, list, None, "a list") values = [substitute_variables(value, os.environ) for value in values] return name, values - def getlist(self, section: str, option: str) -> List[str]: + def getlist(self, section: str, option: str) -> list[str]: _, values = self._get_list(section, option) return values - def getregexlist(self, section: str, option: str) -> List[str]: + def getregexlist(self, section: str, option: str) -> list[str]: name, values = self._get_list(section, option) for value in values: value = value.strip() diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi index 14372d1e3..c5f1c2840 100644 --- a/coverage/tracer.pyi +++ b/coverage/tracer.pyi @@ -1,11 +1,14 @@ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt +"""Typing information for the constructs from our .c files.""" + from typing import Any, Dict from coverage.types import TFileDisposition, TTraceData, TTraceFn, TracerCore class CFileDisposition(TFileDisposition): + """CFileDisposition is in ctracer/filedisp.c""" canonical_filename: Any file_tracer: Any has_dynamic_filename: Any @@ -16,6 +19,7 @@ class CFileDisposition(TFileDisposition): def __init__(self) -> None: ... class CTracer(TracerCore): + """CTracer is in ctracer/tracer.c""" check_include: Any concur_id_func: Any data: TTraceData diff --git a/coverage/types.py b/coverage/types.py index b39798573..d2e0bb965 100644 --- a/coverage/types.py +++ b/coverage/types.py @@ -43,8 +43,8 @@ def __call__( frame: FrameType, event: str, arg: Any, - lineno: Optional[TLineNo] = None # Our own twist, see collector.py - ) -> Optional[TTraceFn]: + lineno: TLineNo | None = None, # Our own twist, see collector.py + ) -> TTraceFn | None: ... ## Coverage.py tracing @@ -59,10 +59,10 @@ class TFileDisposition(Protocol): original_filename: str canonical_filename: str - source_filename: Optional[str] + source_filename: str | None trace: bool reason: str - file_tracer: Optional[FileTracer] + file_tracer: FileTracer | None has_dynamic_filename: bool @@ -84,9 +84,9 @@ class TracerCore(Protocol): data: TTraceData trace_arcs: bool should_trace: Callable[[str, FrameType], TFileDisposition] - should_trace_cache: Mapping[str, Optional[TFileDisposition]] - should_start_context: Optional[Callable[[FrameType], Optional[str]]] - switch_context: Optional[Callable[[Optional[str]], None]] + should_trace_cache: Mapping[str, TFileDisposition | None] + should_start_context: Callable[[FrameType], str | None] | None + switch_context: Callable[[str | None], None] | None warn: TWarnFn def __init__(self) -> None: @@ -104,7 +104,7 @@ def activity(self) -> bool: def reset_activity(self) -> None: """Reset the activity() flag.""" - def get_stats(self) -> Optional[Dict[str, int]]: + def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" @@ -126,7 +126,7 @@ def get_stats(self) -> Optional[Dict[str, int]]: class TConfigurable(Protocol): """Something that can proxy to the coverage configuration settings.""" - def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and @@ -137,7 +137,7 @@ def get_option(self, option_name: str) -> Optional[TConfigValueOut]: """ - def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and @@ -173,7 +173,7 @@ class TPlugin(Protocol): class TWarnFn(Protocol): """A callable warn() function.""" - def __call__(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None: + def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ... diff --git a/coverage/version.py b/coverage/version.py index 73db1316a..10f4115ef 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -8,7 +8,7 @@ # version_info: same semantics as sys.version_info. # _dev: the .devN suffix if any. -version_info = (7, 4, 3, "final", 0) +version_info = (7, 4, 4, "final", 0) _dev = 0 diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py index 819b4c6bc..b346a2d78 100644 --- a/coverage/xmlreport.py +++ b/coverage/xmlreport.py @@ -12,7 +12,7 @@ import xml.dom.minidom from dataclasses import dataclass -from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING +from typing import Any, IO, Iterable, TYPE_CHECKING from coverage import __version__, files from coverage.misc import isolate_module, human_sorted, human_sorted_items @@ -42,7 +42,7 @@ def rate(hit: int, num: int) -> str: @dataclass class PackageData: """Data we keep about each "package" (in Java terms).""" - elements: Dict[str, xml.dom.minidom.Element] + elements: dict[str, xml.dom.minidom.Element] hits: int lines: int br_hits: int @@ -72,10 +72,10 @@ def __init__(self, coverage: Coverage) -> None: else: src = files.canonical_filename(src) self.source_paths.add(src) - self.packages: Dict[str, PackageData] = {} + self.packages: dict[str, PackageData] = {} self.xml_out: xml.dom.minidom.Document - def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float: + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: """Generate a Cobertura-compatible XML report for `morfs`. `morfs` is a list of modules or file names. @@ -97,7 +97,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = xcoverage.setAttribute("version", __version__) xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) xcoverage.appendChild(self.xml_out.createComment( - f" Generated by coverage.py: {__url__} " + f" Generated by coverage.py: {__url__} ", )) xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) @@ -222,7 +222,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None xline.setAttribute("branch", "true") xline.setAttribute( "condition-coverage", - "%d%% (%d/%d)" % (100*taken//total, taken, total) + "%d%% (%d/%d)" % (100*taken//total, taken, total), ) if line in missing_branch_arcs: annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] diff --git a/doc/conf.py b/doc/conf.py index 4e5150973..5bdcca17b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -67,14 +67,14 @@ # @@@ editable copyright = "2009–2024, Ned Batchelder" # pylint: disable=redefined-builtin # The short X.Y.Z version. -version = "7.4.3" +version = "7.4.4" # The full version, including alpha/beta/rc tags. -release = "7.4.3" +release = "7.4.4" # The date of release, in "monthname day, year" format. -release_date = "February 23, 2024" +release_date = "March 14, 2024" # @@@ end -rst_epilog = """ +rst_epilog = f""" .. |release_date| replace:: {release_date} .. |coverage-equals-release| replace:: coverage=={release} .. |doc-url| replace:: https://coverage.readthedocs.io/en/{release} @@ -82,7 +82,7 @@
-""".format(release=release, release_date=release_date) +""" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/config.rst b/doc/config.rst index 540ec780a..4485145c0 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -627,6 +627,19 @@ use of the decimal places. A setting of 100 will fail any value under 100, regardless of the number of decimal places of precision. +.. _config_report_format: + +[report] format +............... + +(string, default "text") The format to use for the textual report. The default +is "text" which produces a simple textual table. You can use "markdown" to +produce a Markdown table, or "total" to output only the total coverage +percentage. + +.. versionadded:: 7.0 + + .. _config_report_ignore_errors: [report] ignore_errors diff --git a/doc/requirements.in b/doc/requirements.in index 6d1bd330f..3b00a4082 100644 --- a/doc/requirements.in +++ b/doc/requirements.in @@ -7,14 +7,12 @@ -c ../requirements/pins.pip cogapp +doc8 +pyenchant scriv # for writing GitHub releases sphinx sphinx-autobuild sphinx_rtd_theme sphinx-code-tabs sphinxcontrib-restbuilder - -# These aren't compatible atm with other library versions: -#doc8 -#pyenchant -#sphinxcontrib-spelling +sphinxcontrib-spelling diff --git a/doc/requirements.pip b/doc/requirements.pip index ec756bc80..78f27e095 100644 --- a/doc/requirements.pip +++ b/doc/requirements.pip @@ -4,57 +4,75 @@ # # make doc_upgrade # -alabaster==0.7.13 +alabaster==0.7.16 # via sphinx -attrs==23.1.0 +attrs==23.2.0 # via scriv -babel==2.12.1 +babel==2.14.0 # via sphinx -certifi==2023.5.7 +certifi==2024.2.2 # via requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.2 # via requests -click==8.1.3 +click==8.1.7 # via # click-log # scriv click-log==0.4.0 # via scriv -cogapp==3.3.0 +cogapp==3.4.1 # via -r doc/requirements.in colorama==0.4.6 # via sphinx-autobuild -docutils==0.18.1 +doc8==1.1.1 + # via -r doc/requirements.in +docutils==0.20.1 # via + # doc8 + # restructuredtext-lint # sphinx # sphinx-rtd-theme -idna==3.4 +idna==3.6 # via requests imagesize==1.4.1 # via sphinx -jinja2==3.1.2 +jinja2==3.1.3 # via # scriv # sphinx livereload==2.6.3 # via sphinx-autobuild -markupsafe==2.1.3 +markdown-it-py==3.0.0 + # via scriv +markupsafe==2.1.5 # via jinja2 -packaging==23.1 - # via sphinx -pygments==2.15.1 +mdurl==0.1.2 + # via markdown-it-py +packaging==24.0 # via sphinx +pbr==6.0.0 + # via stevedore +pyenchant==3.2.2 + # via + # -r doc/requirements.in + # sphinxcontrib-spelling +pygments==2.17.2 + # via + # doc8 + # sphinx requests==2.31.0 # via # scriv # sphinx -scriv==1.3.1 +restructuredtext-lint==1.4.0 + # via doc8 +scriv==1.5.1 # via -r doc/requirements.in six==1.16.0 # via livereload snowballstemmer==2.2.0 # via sphinx -sphinx==6.2.1 +sphinx==7.2.6 # via # -r doc/requirements.in # sphinx-autobuild @@ -62,29 +80,34 @@ sphinx==6.2.1 # sphinx-rtd-theme # sphinxcontrib-jquery # sphinxcontrib-restbuilder -sphinx-autobuild==2021.3.14 + # sphinxcontrib-spelling +sphinx-autobuild==2024.2.4 # via -r doc/requirements.in -sphinx-code-tabs==0.5.3 +sphinx-code-tabs==0.5.5 # via -r doc/requirements.in -sphinx-rtd-theme==1.2.2 +sphinx-rtd-theme==2.0.0 # via -r doc/requirements.in -sphinxcontrib-applehelp==1.0.4 +sphinxcontrib-applehelp==1.0.8 # via sphinx -sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-devhelp==1.0.6 # via sphinx -sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-htmlhelp==2.0.5 # via sphinx sphinxcontrib-jquery==4.1 # via sphinx-rtd-theme sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-qthelp==1.0.7 # via sphinx sphinxcontrib-restbuilder==0.3 # via -r doc/requirements.in -sphinxcontrib-serializinghtml==1.1.5 +sphinxcontrib-serializinghtml==1.1.10 # via sphinx -tornado==6.3.2 +sphinxcontrib-spelling==8.0.0 + # via -r doc/requirements.in +stevedore==5.2.0 + # via doc8 +tornado==6.4 # via livereload -urllib3==2.0.3 +urllib3==2.2.1 # via requests diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html index 8ecdb82c7..5e04da820 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html @@ -66,8 +66,8 @@

^ index     » next       - coverage.py v7.4.3, - created at 2024-02-23 15:25 -0500 + coverage.py v7.4.4, + created at 2024-03-14 14:39 -0400