From bec9629b88975e36be91bd496c03d1927ab99949 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 16 Aug 2025 20:27:41 -0400 Subject: [PATCH 01/43] build: bump version to 7.10.5 --- CHANGES.rst | 6 ++++++ coverage/version.py | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 64581b7ac..a16bc3fa7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -20,6 +20,12 @@ upgrading your version of coverage.py. .. Version 9.8.1 — 2027-07-27 .. -------------------------- +Unreleased +---------- + +Nothing yet. + + .. start-releases .. _changes_7-10-4: diff --git a/coverage/version.py b/coverage/version.py index aaaf26131..ce20ac49b 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -8,8 +8,8 @@ # version_info: same semantics as sys.version_info. # _dev: the .devN suffix if any. -version_info = (7, 10, 4, "final", 0) -_dev = 0 +version_info = (7, 10, 5, "alpha", 0) +_dev = 1 def _make_version( From 1c479c60e9132068ab8f698f55b6620a00114976 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Aug 2025 09:54:55 -0400 Subject: [PATCH 02/43] chore: bump the action-dependencies group with 3 updates (#2031) Bumps the action-dependencies group with 3 updates: [actions/checkout](https://github.com/actions/checkout), [github/codeql-action](https://github.com/github/codeql-action) and [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv). Updates `actions/checkout` from 4.2.2 to 5.0.0 - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) Updates `github/codeql-action` from 3.29.8 to 3.29.9 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/76621b61decf072c1cee8dd1ce2d2a82d33c17ed...df559355d593797519d70b90fc8edd5db049e7a2) Updates `astral-sh/setup-uv` from 6.4.3 to 6.5.0 - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/e92bafb6253dcd438e0484186d7669ea7a8ca1cc...d9e0f98d3fc6adb07d1e3d37f3043649ddad06a1) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: action-dependencies - dependency-name: github/codeql-action dependency-version: 3.29.9 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: action-dependencies - dependency-name: astral-sh/setup-uv dependency-version: 6.5.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: action-dependencies ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 8 ++++---- .github/workflows/coverage.yml | 6 +++--- .github/workflows/dependency-review.yml | 2 +- .github/workflows/kit.yml | 4 ++-- .github/workflows/python-nightly.yml | 2 +- .github/workflows/quality.yml | 12 ++++++------ .github/workflows/testsuite.yml | 4 ++-- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6e85e4a91..a2172ab84 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -45,13 +45,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -62,7 +62,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -76,4 +76,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 0d4282982..5ccd7c737 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -39,7 +39,7 @@ jobs: workflow: ${{ steps.filter.outputs.workflow }} steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -122,7 +122,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -183,7 +183,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 070b5ff07..99c70bbb9 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml index 6ee26b671..a271fb1c6 100644 --- a/.github/workflows/kit.yml +++ b/.github/workflows/kit.yml @@ -167,7 +167,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -224,7 +224,7 @@ jobs: runs-on: ubuntu-latest steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml index e8b748ca8..9879edc5d 100644 --- a/.github/workflows/python-nightly.yml +++ b/.github/workflows/python-nightly.yml @@ -59,7 +59,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index cf3842176..67c7608ef 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -38,7 +38,7 @@ jobs: workflow: ${{ steps.filter.outputs.workflow }} steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -73,7 +73,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -101,7 +101,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -129,7 +129,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -168,12 +168,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - name: Install the latest version of uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc #v6.4.3 + uses: astral-sh/setup-uv@d9e0f98d3fc6adb07d1e3d37f3043649ddad06a1 #v6.5.0 with: enable-cache: false diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml index 6c63165f2..b53364ecc 100644 --- a/.github/workflows/testsuite.yml +++ b/.github/workflows/testsuite.yml @@ -37,7 +37,7 @@ jobs: run_tests: ${{ steps.filter.outputs.run_tests }} steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -109,7 +109,7 @@ jobs: steps: - name: "Check out the repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false From 2e9a18dfa28bdf1415c2327a2a22cf7ee113a8e2 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 17 Aug 2025 09:57:35 -0400 Subject: [PATCH 03/43] build: silence a warning we don't care about --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 47628b035..d1e55f1c0 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,7 @@ _upgrade: $(DOCBIN) $(KITBIN) $(PIP_COMPILE) -o requirements/light-threads.pip requirements/light-threads.in $(PIP_COMPILE) -o requirements/mypy.pip requirements/mypy.in $(PIP_COMPILE) -p $(DOCBIN)/python3 -o doc/requirements.pip doc/requirements.in - pre-commit autoupdate + PYTHONWARNDEFAULTENCODING= pre-commit autoupdate diff_upgrade: ## Summarize the last `make upgrade`. @# The sort flags sort by the package name first, then by the -/+, and From aae0ad9447fb170819353a9f54a0f2f45889203d Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Sun, 17 Aug 2025 12:50:07 -0400 Subject: [PATCH 04/43] perf: small optimizations to combine (#2032) a) Avoid copying lists in executemany_void: while some callers provide an iterator, others were already providing a list and in that case we had to copy it b) Avoid unpacking and re-packing contexts In total this shaves about 10% of the combine time in pyca/cryptography's CI --- coverage/sqldata.py | 12 ++++++------ coverage/sqlitedb.py | 3 +-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 857ac24a7..6b3f0dbff 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -685,7 +685,7 @@ def update( # Get contexts data. with con.execute("select context from context") as cur: - contexts = [context for (context,) in cur] + contexts = cur.fetchall() # Get arc data. with con.execute( @@ -743,14 +743,14 @@ def update( # Create all file and context rows in the DB. con.executemany_void( "insert or ignore into file (path) values (?)", - ((file,) for file in files.values()), + [(file,) for file in files.values()], ) with con.execute("select id, path from file") as cur: file_ids = {path: id for id, path in cur} self._file_map.update(file_ids) con.executemany_void( "insert or ignore into context (context) values (?)", - ((context,) for context in contexts), + contexts, ) with con.execute("select id, context from context") as cur: context_ids = {context: id for id, context in cur} @@ -778,10 +778,10 @@ def update( if arcs: self._choose_lines_or_arcs(arcs=True) - arc_rows = ( + arc_rows = [ (file_ids[file], context_ids[context], fromno, tono) for file, context, fromno, tono in arcs - ) + ] # Write the combined data. con.executemany_void( @@ -813,7 +813,7 @@ def update( con.executemany_void( "insert or ignore into tracer (file_id, tracer) values (?, ?)", - ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()), + [(file_ids[filename], tracer) for filename, tracer in tracer_map.items()], ) if not self._no_disk: diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py index 4eed618e6..544594eb8 100644 --- a/coverage/sqlitedb.py +++ b/coverage/sqlitedb.py @@ -210,9 +210,8 @@ def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: # https://github.com/nedbat/coveragepy/issues/1010 return self.con.executemany(sql, data) - def executemany_void(self, sql: str, data: Iterable[Any]) -> None: + def executemany_void(self, sql: str, data: list[Any]) -> None: """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" - data = list(data) if data: self._executemany(sql, data).close() From 27c2318fe7e1b33e4c683103d0fbcd97968b1b8f Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 17 Aug 2025 12:52:25 -0400 Subject: [PATCH 05/43] docs: thanks, Alex Gaynor #2032 --- CHANGES.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index a16bc3fa7..64dd5ddd6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -23,7 +23,10 @@ upgrading your version of coverage.py. Unreleased ---------- -Nothing yet. +- Slight speed improvements to ``coverage combine``, thanks to `Alex Gaynor + `_. + +.. _pull 2032: https://github.com/nedbat/coveragepy/pull/2032 .. start-releases From 575740bf47152159602ce3ea88eb244284ef7c99 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 17 Aug 2025 12:31:50 -0400 Subject: [PATCH 06/43] docs: slight tweaks to the issue template --- .github/ISSUE_TEMPLATE/bug_report.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 711103239..9f5cc77c5 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,11 +11,13 @@ assignees: '' A clear and concise description of the bug. **To Reproduce** -How can we reproduce the problem? Please *be specific*. Don't link to a failing CI job. Answer the questions below: +How can we reproduce the problem? Please *be specific*. Don't link to a failing CI job. Think about the time it will take us to recreate your situation: the easier you make it, the more likely your issue will be addressed. + +Answer the questions below: 1. What version of Python are you using? 1. What version of coverage.py shows the problem? The output of `coverage debug sys` is helpful. 1. What versions of what packages do you have installed? The output of `pip freeze` is helpful. -1. What code shows the problem? Give us a specific commit of a specific repo that we can check out. If you've already worked around the problem, please provide a commit before that fix. +1. What code shows the problem? Give us a *specific commit* of a *specific repo* that we can check out. If you've already worked around the problem, please provide a commit before that fix. 1. What commands should we run to reproduce the problem? *Be specific*. Include everything, even `git clone`, `pip install`, and so on. Explain like we're five! **Expected behavior** From d8f88c76568aaea8cc98c3c13590ada5d265c227 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Sun, 17 Aug 2025 18:38:31 -0400 Subject: [PATCH 07/43] refactor: use SQLite URIs for in-memory databases (#2034) This lets us ATTACH DATABASE between in-memory databases. --- coverage/sqldata.py | 7 ++++--- coverage/sqlitedb.py | 13 +++++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 6b3f0dbff..314d4c86b 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -18,6 +18,7 @@ import sys import textwrap import threading +import uuid import zlib from collections.abc import Collection, Mapping, Sequence from typing import Any, Callable, cast @@ -263,7 +264,7 @@ def _debug_dataio(self, msg: str, filename: str) -> None: def _choose_filename(self) -> None: """Set self._filename based on inited attributes.""" if self._no_disk: - self._filename = ":memory:" + self._filename = f"file:coverage-{uuid.uuid4()}?mode=memory&cache=shared" else: self._filename = self._basename suffix = filename_suffix(self._suffix) @@ -289,7 +290,7 @@ def close(self, force: bool = False) -> None: def _open_db(self) -> None: """Open an existing db file, and read its metadata.""" self._debug_dataio("Opening data file", self._filename) - self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) + self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug, self._no_disk) self._read_db() def _read_db(self) -> None: @@ -402,7 +403,7 @@ def loads(self, data: bytes) -> None: f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)", ) script = zlib.decompress(data[1:]).decode("utf-8") - self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) + self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug, self._no_disk) with db: db.executescript(script) self._read_db() diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py index 544594eb8..c80702290 100644 --- a/coverage/sqlitedb.py +++ b/coverage/sqlitedb.py @@ -28,9 +28,10 @@ class SqliteDb: etc(a, b) """ - def __init__(self, filename: str, debug: TDebugCtl) -> None: + def __init__(self, filename: str, debug: TDebugCtl, no_disk: bool = False) -> None: self.debug = debug self.filename = filename + self.no_disk = no_disk self.nest = 0 self.con: sqlite3.Connection | None = None @@ -49,7 +50,11 @@ def _connect(self) -> None: if self.debug.should("sql"): self.debug.write(f"Connecting to {self.filename!r}") try: - self.con = sqlite3.connect(self.filename, check_same_thread=False) + # Use uri=True when connecting to memory URIs + if self.filename.startswith("file:"): + self.con = sqlite3.connect(self.filename, check_same_thread=False, uri=True) + else: + self.con = sqlite3.connect(self.filename, check_same_thread=False) except sqlite3.Error as exc: raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc @@ -78,7 +83,7 @@ def _connect(self) -> None: def close(self, force: bool = False) -> None: """If needed, close the connection.""" if self.con is not None: - if force or self.filename != ":memory:": + if force or not self.no_disk: if self.debug.should("sql"): self.debug.write(f"Closing {self.con!r} on {self.filename!r}") self.con.close() @@ -120,7 +125,7 @@ def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: return self.con.execute(sql, parameters) # type: ignore[arg-type] except sqlite3.Error as exc: msg = str(exc) - if self.filename != ":memory:": + if not self.no_disk: try: # `execute` is the first thing we do with the database, so try # hard to provide useful hints if something goes wrong now. From 9e50a02a960dfc25f4bd72177e99ab21ac76f792 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 18 Aug 2025 06:21:53 -0400 Subject: [PATCH 08/43] build: include coverage report in the action output --- .github/workflows/coverage.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5ccd7c737..ba81d4ad1 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -231,6 +231,14 @@ jobs: path: htmlcov include-hidden-files: true + - name: "Show text report" + run: | + set -xe + echo "## Coverage: $(python -m coverage report --format=total)%" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + python -m coverage report --skip-covered >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + - name: "Get total" id: total run: | @@ -261,12 +269,6 @@ jobs: echo "url=https://htmlpreview.github.io/?https://github.com/nedbat/coverage-reports/blob/main/reports/$SLUG/htmlcov/index.html" >> $GITHUB_ENV echo "branch=${REF#refs/heads/}" >> $GITHUB_ENV - - name: "Summarize" - env: - TOTAL: ${{ needs.combine.outputs.total }} - run: | - echo "### TOTAL coverage: ${TOTAL}%" >> $GITHUB_STEP_SUMMARY - - name: "Checkout reports repo" if: ${{ github.ref == 'refs/heads/master' }} env: From b3dd60ffb3f82104a8eceeaa932f138637306e62 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 18 Aug 2025 19:14:15 -0400 Subject: [PATCH 09/43] build: don't make a JSON report, I wasn't using it The file was 2.8Gb and took two minutes to produce! --- igor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/igor.py b/igor.py index 0cde71f59..7128d1a14 100644 --- a/igor.py +++ b/igor.py @@ -241,7 +241,6 @@ def do_combine_html(): os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT"), ) total = cov.html_report(show_contexts=show_contexts) - cov.json_report(show_contexts=show_contexts, pretty_print=True) print(f"Total: {total:.3f}%") From 049a112bef6c0e76f8f06618e8e0f21340dc4c4c Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 18 Aug 2025 20:13:43 -0400 Subject: [PATCH 10/43] build: time some small steps when combining --- igor.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/igor.py b/igor.py index 7128d1a14..24b339aa8 100644 --- a/igor.py +++ b/igor.py @@ -8,6 +8,7 @@ """ +import contextlib import datetime import glob import inspect @@ -21,6 +22,7 @@ import sys import sysconfig import textwrap +import time import types import zipfile @@ -45,6 +47,16 @@ # by "python igor.py blah". +@contextlib.contextmanager +def time_message(msg: str): + """Print a message about how long something took.""" + start = time.monotonic() + try: + yield + finally: + print(f"Time for {msg}: {time.monotonic() - start:.2f}s") + + def do_show_env(): """Show the environment variables.""" print("Environment:") @@ -231,7 +243,8 @@ def do_combine_html(): os.environ["COVERAGE_HOME"] = os.getcwd() cov = coverage.Coverage(config_file="metacov.ini") cov.load() - cov.combine() + with time_message("combine"): + cov.combine() cov.save() # A new Coverage to turn on messages. Better would be to have tighter # control over message verbosity... @@ -240,7 +253,8 @@ def do_combine_html(): show_contexts = bool( os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT"), ) - total = cov.html_report(show_contexts=show_contexts) + with time_message("html"): + total = cov.html_report(show_contexts=show_contexts) print(f"Total: {total:.3f}%") From 81e01895d88e9c79008450e1dde4f858a900a903 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Tue, 19 Aug 2025 07:06:41 -0400 Subject: [PATCH 11/43] perf: move the core of the combine logic to be entirely in SQL (#2033) This shaves roughly 40% of the runtime off of pyca/cryptography's combine. --- coverage/sqldata.py | 249 +++++++++++++++++++++++--------------------- 1 file changed, 128 insertions(+), 121 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 314d4c86b..4fe7cd825 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -122,6 +122,22 @@ def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: return _wrapped +class NumbitsUnionAgg: + """SQLite aggregate function for computing union of numbits.""" + + def __init__(self) -> None: + self.result = b"" + + def step(self, value: bytes) -> None: + """Process one value in the aggregation.""" + if value: + self.result = numbits_union(self.result, value) + + def finalize(self) -> bytes: + """Return the final aggregated result.""" + return self.result + + class CoverageData: """Manages collected coverage data, including file storage. @@ -676,146 +692,137 @@ def update( # Force the database we're writing to to exist before we start nesting contexts. self._start_using() - - # Collector for all arcs, lines and tracers other_data.read() - with other_data._connect() as con: - # Get files data. - with con.execute("select path from file") as cur: - files = {path: map_path(path) for (path,) in cur} - - # Get contexts data. - with con.execute("select context from context") as cur: - contexts = cur.fetchall() - - # Get arc data. - with con.execute( - "select file.path, context.context, arc.fromno, arc.tono " + - "from arc " + - "inner join file on file.id = arc.file_id " + - "inner join context on context.id = arc.context_id", - ) as cur: - arcs = [ - (files[path], context, fromno, tono) - for (path, context, fromno, tono) in cur - ] - - # Get line data. - with con.execute( - "select file.path, context.context, line_bits.numbits " + - "from line_bits " + - "inner join file on file.id = line_bits.file_id " + - "inner join context on context.id = line_bits.context_id", - ) as cur: - lines: dict[tuple[str, str], bytes] = {} - for path, context, numbits in cur: - key = (files[path], context) - if key in lines: - numbits = numbits_union(lines[key], numbits) - lines[key] = numbits - - # Get tracer data. - with con.execute( - "select file.path, tracer " + - "from tracer " + - "inner join file on file.id = tracer.file_id", - ) as cur: - tracers = {files[path]: tracer for (path, tracer) in cur} + + # Ensure other_data has a properly initialized database + with other_data._connect(): + pass with self._connect() as con: assert con.con is not None con.con.isolation_level = "IMMEDIATE" - # Get all tracers in the DB. Files not in the tracers are assumed - # to have an empty string tracer. Since Sqlite does not support - # full outer joins, we have to make two queries to fill the - # dictionary. - with con.execute("select path from file") as cur: - this_tracers = {path: "" for path, in cur} - with con.execute( - "select file.path, tracer from tracer " + - "inner join file on file.id = tracer.file_id", - ) as cur: - this_tracers.update({ - map_path(path): tracer - for path, tracer in cur - }) - - # Create all file and context rows in the DB. - con.executemany_void( - "insert or ignore into file (path) values (?)", - [(file,) for file in files.values()], - ) - with con.execute("select id, path from file") as cur: - file_ids = {path: id for id, path in cur} - self._file_map.update(file_ids) - con.executemany_void( - "insert or ignore into context (context) values (?)", - contexts, + # Register functions for SQLite + con.con.create_function("numbits_union", 2, numbits_union) + con.con.create_function("map_path", 1, map_path) + con.con.create_aggregate( + "numbits_union_agg", 1, NumbitsUnionAgg # type: ignore[arg-type] ) - with con.execute("select id, context from context") as cur: - context_ids = {context: id for id, context in cur} - - # Prepare tracers and fail, if a conflict is found. - # tracer_paths is used to ensure consistency over the tracer data - # and tracer_map tracks the tracers to be inserted. - tracer_map = {} - for path in files.values(): - this_tracer = this_tracers.get(path) - other_tracer = tracers.get(path, "") - # If there is no tracer, there is always the None tracer. - if this_tracer is not None and this_tracer != other_tracer: + + # Attach the other database + con.execute_void("ATTACH DATABASE ? AS other_db", (other_data.data_filename(),)) + + # Create temporary table with mapped file paths to avoid repeated map_path() calls + con.execute_void(""" + CREATE TEMP TABLE other_file_mapped AS + SELECT + other_file.id as other_file_id, + map_path(other_file.path) as mapped_path + FROM other_db.file AS other_file + """) + + # Check for tracer conflicts before proceeding + with con.execute(""" + SELECT other_file_mapped.mapped_path, + COALESCE(main.tracer.tracer, ''), + COALESCE(other_db.tracer.tracer, '') + FROM main.file + LEFT JOIN main.tracer ON main.file.id = main.tracer.file_id + INNER JOIN other_file_mapped ON main.file.path = other_file_mapped.mapped_path + LEFT JOIN other_db.tracer ON other_file_mapped.other_file_id = other_db.tracer.file_id + WHERE COALESCE(main.tracer.tracer, '') != COALESCE(other_db.tracer.tracer, '') + """) as cur: + conflicts = list(cur) + if conflicts: + path, this_tracer, other_tracer = conflicts[0] raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( path, this_tracer, other_tracer, ), ) - tracer_map[path] = other_tracer - # Prepare arc and line rows to be inserted by converting the file - # and context strings with integer ids. Then use the efficient - # `executemany()` to insert all rows at once. + # Insert missing files from other_db (with map_path applied) + con.execute_void(""" + INSERT OR IGNORE INTO main.file (path) + SELECT DISTINCT mapped_path FROM other_file_mapped + """) - if arcs: - self._choose_lines_or_arcs(arcs=True) + # Insert missing contexts from other_db + con.execute_void(""" + INSERT OR IGNORE INTO main.context (context) + SELECT context FROM other_db.context + """) - arc_rows = [ - (file_ids[file], context_ids[context], fromno, tono) - for file, context, fromno, tono in arcs - ] + # Update file_map with any new files + with con.execute("select id, path from file") as cur: + self._file_map.update({path: id for id, path in cur}) - # Write the combined data. - con.executemany_void( - "insert or ignore into arc " + - "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", - arc_rows, - ) + with con.execute(""" + SELECT + EXISTS(SELECT 1 FROM other_db.arc), + EXISTS(SELECT 1 FROM other_db.line_bits) + """) as cur: + has_arcs, has_lines = cur.fetchone() - if lines: + # Handle arcs if present in other_db + if has_arcs: + self._choose_lines_or_arcs(arcs=True) + con.execute_void(""" + INSERT OR IGNORE INTO main.arc (file_id, context_id, fromno, tono) + SELECT + main_file.id, + main_context.id, + other_arc.fromno, + other_arc.tono + FROM other_db.arc AS other_arc + INNER JOIN other_file_mapped ON other_arc.file_id = other_file_mapped.other_file_id + INNER JOIN other_db.context AS other_context ON other_arc.context_id = other_context.id + INNER JOIN main.file AS main_file ON other_file_mapped.mapped_path = main_file.path + INNER JOIN main.context AS main_context ON other_context.context = main_context.context + """) + + # Handle line_bits if present in other_db + if has_lines: self._choose_lines_or_arcs(lines=True) - for (file, context), numbits in lines.items(): - with con.execute( - "select numbits from line_bits where file_id = ? and context_id = ?", - (file_ids[file], context_ids[context]), - ) as cur: - existing = list(cur) - if existing: - lines[(file, context)] = numbits_union(numbits, existing[0][0]) - - con.executemany_void( - "insert or replace into line_bits " + - "(file_id, context_id, numbits) values (?, ?, ?)", - [ - (file_ids[file], context_ids[context], numbits) - for (file, context), numbits in lines.items() - ], - ) - - con.executemany_void( - "insert or ignore into tracer (file_id, tracer) values (?, ?)", - [(file_ids[filename], tracer) for filename, tracer in tracer_map.items()], - ) + # Handle line_bits by aggregating other_db data by mapped target, + # then inserting/updating + con.execute_void(""" + INSERT OR REPLACE INTO main.line_bits (file_id, context_id, numbits) + SELECT + main_file.id, + main_context.id, + numbits_union( + COALESCE(( + SELECT numbits FROM main.line_bits + WHERE file_id = main_file.id AND context_id = main_context.id + ), X''), + aggregated.combined_numbits + ) + FROM ( + SELECT + other_file_mapped.mapped_path, + other_context.context, + numbits_union_agg(other_line_bits.numbits) as combined_numbits + FROM other_db.line_bits AS other_line_bits + INNER JOIN other_file_mapped ON other_line_bits.file_id = other_file_mapped.other_file_id + INNER JOIN other_db.context AS other_context ON other_line_bits.context_id = other_context.id + GROUP BY other_file_mapped.mapped_path, other_context.context + ) AS aggregated + INNER JOIN main.file AS main_file ON aggregated.mapped_path = main_file.path + INNER JOIN main.context AS main_context ON aggregated.context = main_context.context + """) + + # Insert tracers from other_db (avoiding conflicts we already checked) + con.execute_void(""" + INSERT OR IGNORE INTO main.tracer (file_id, tracer) + SELECT + main_file.id, + other_tracer.tracer + FROM other_db.tracer AS other_tracer + INNER JOIN other_file_mapped ON other_tracer.file_id = other_file_mapped.other_file_id + INNER JOIN main.file AS main_file ON other_file_mapped.mapped_path = main_file.path + """) if not self._no_disk: # Update all internal cache data. From 697cdebea7f53860965d1d77b8d985f144067d1a Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Tue, 19 Aug 2025 07:10:44 -0400 Subject: [PATCH 12/43] docs: huge thanks to Alex Gaynor --- CHANGES.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 64dd5ddd6..43cd70779 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -23,10 +23,13 @@ upgrading your version of coverage.py. Unreleased ---------- -- Slight speed improvements to ``coverage combine``, thanks to `Alex Gaynor - `_. +- Big speed improvements for ``coverage combine``: it now takes about half the + time it used to! Huge thanks to Alex Gaynor for pull requests `2032 + `_, `2033 `_, and `2034 `_. .. _pull 2032: https://github.com/nedbat/coveragepy/pull/2032 +.. _pull 2033: https://github.com/nedbat/coveragepy/pull/2033 +.. _pull 2034: https://github.com/nedbat/coveragepy/pull/2034 .. start-releases From 9096b00b3e0e900c9791c89333a9a8c36d3b6c4a Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Tue, 19 Aug 2025 07:20:44 -0400 Subject: [PATCH 13/43] style: use upper-case for SQL throughout --- coverage/sqldata.py | 82 ++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 4fe7cd825..92104a71e 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -346,7 +346,7 @@ def _init_db(self, db: SqliteDb) -> None: """Write the initial contents of the database.""" self._debug_dataio("Initing data file", self._filename) db.executescript(SCHEMA) - db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) + db.execute_void("INSERT INTO coverage_schema (version) VALUES (?)", (SCHEMA_VERSION,)) # When writing metadata, avoid information that will needlessly change # the hash of the data file, unless we're debugging processes. @@ -358,7 +358,7 @@ def _init_db(self, db: SqliteDb) -> None: ("sys_argv", str(getattr(sys, "argv", None))), ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), ]) - db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data) + db.executemany_void("INSERT OR IGNORE INTO meta (key, value) VALUES (?, ?)", meta_data) def _connect(self) -> SqliteDb: """Get the SqliteDb object to use.""" @@ -371,7 +371,7 @@ def __bool__(self) -> bool: return False try: with self._connect() as con: - with con.execute("select * from file limit 1") as cur: + with con.execute("SELECT * FROM file LIMIT 1") as cur: return bool(list(cur)) except CoverageException: return False @@ -435,7 +435,7 @@ def _file_id(self, filename: str, add: bool = False) -> int | None: if add: with self._connect() as con: self._file_map[filename] = con.execute_for_rowid( - "insert or replace into file (path) values (?)", + "INSERT OR REPLACE INTO file (path) VALUES (?)", (filename,), ) return self._file_map.get(filename) @@ -445,7 +445,7 @@ def _context_id(self, context: str) -> int | None: assert context is not None self._start_using() with self._connect() as con: - row = con.execute_one("select id from context where context = ?", (context,)) + row = con.execute_one("SELECT id FROM context WHERE context = ?", (context,)) if row is not None: return cast(int, row[0]) else: @@ -475,7 +475,7 @@ def _set_context_id(self) -> None: else: with self._connect() as con: self._current_context_id = con.execute_for_rowid( - "insert into context (context) values (?)", + "INSERT INTO context (context) VALUES (?)", (context,), ) @@ -520,15 +520,17 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: for filename, linenos in line_data.items(): line_bits = nums_to_numbits(linenos) file_id = self._file_id(filename, add=True) - query = "select numbits from line_bits where file_id = ? and context_id = ?" + query = "SELECT numbits FROM line_bits WHERE file_id = ? AND context_id = ?" with con.execute(query, (file_id, self._current_context_id)) as cur: existing = list(cur) if existing: line_bits = numbits_union(line_bits, existing[0][0]) con.execute_void( - "insert or replace into line_bits " + - " (file_id, context_id, numbits) values (?, ?, ?)", + """ + INSERT OR REPLACE INTO line_bits + (file_id, context_id, numbits) VALUES (?, ?, ?) + """, (file_id, self._current_context_id, line_bits), ) @@ -561,8 +563,10 @@ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: file_id = self._file_id(filename, add=True) data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] con.executemany_void( - "insert or ignore into arc " + - "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", + """ + INSERT OR IGNORE INTO arc + (file_id, context_id, fromno, tono) VALUES (?, ?, ?, ?) + """, data, ) @@ -583,7 +587,7 @@ def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None self._has_arcs = arcs with self._connect() as con: con.execute_void( - "insert or ignore into meta (key, value) values (?, ?)", + "INSERT OR IGNORE INTO meta (key, value) VALUES (?, ?)", ("has_arcs", str(int(arcs))), ) @@ -612,7 +616,7 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: ) elif plugin_name: con.execute_void( - "insert into tracer (file_id, tracer) values (?, ?)", + "INSERT INTO TRACER (file_id, tracer) VALUES (?, ?)", (file_id, plugin_name), ) @@ -655,9 +659,9 @@ def purge_files(self, filenames: Collection[str]) -> None: with self._connect() as con: if self._has_lines: - sql = "delete from line_bits where file_id=?" + sql = "DELETE FROM line_bits WHERE file_id=?" elif self._has_arcs: - sql = "delete from arc where file_id=?" + sql = "DELETE FROM arc WHERE file_id=?" else: raise DataError("Can't purge files in an empty CoverageData") @@ -754,7 +758,7 @@ def update( """) # Update file_map with any new files - with con.execute("select id, path from file") as cur: + with con.execute("SELECT id, path FROM file") as cur: self._file_map.update({path: id for id, path in cur}) with con.execute(""" @@ -891,7 +895,7 @@ def measured_contexts(self) -> set[str]: """ self._start_using() with self._connect() as con: - with con.execute("select distinct(context) from context") as cur: + with con.execute("SELECT DISTINCT(context) FROM context") as cur: contexts = {row[0] for row in cur} return contexts @@ -908,7 +912,7 @@ def file_tracer(self, filename: str) -> str | None: file_id = self._file_id(filename) if file_id is None: return None - row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) + row = con.execute_one("SELECT tracer FROM tracer WHERE file_id = ?", (file_id,)) if row is not None: return row[0] or "" return "" # File was measured, but no tracer associated. @@ -926,7 +930,7 @@ def set_query_context(self, context: str) -> None: """ self._start_using() with self._connect() as con: - with con.execute("select id from context where context = ?", (context,)) as cur: + with con.execute("SELECT id FROM context WHERE context = ?", (context,)) as cur: self._query_context_ids = [row[0] for row in cur.fetchall()] def set_query_contexts(self, contexts: Sequence[str] | None) -> None: @@ -944,8 +948,8 @@ def set_query_contexts(self, contexts: Sequence[str] | None) -> None: self._start_using() if contexts: with self._connect() as con: - context_clause = " or ".join(["context regexp ?"] * len(contexts)) - with con.execute("select id from context where " + context_clause, contexts) as cur: + context_clause = " or ".join(["context REGEXP ?"] * len(contexts)) + with con.execute("SELECT id FROM context WHERE " + context_clause, contexts) as cur: self._query_context_ids = [row[0] for row in cur.fetchall()] else: self._query_context_ids = None @@ -972,11 +976,11 @@ def lines(self, filename: str) -> list[TLineNo] | None: if file_id is None: return None else: - query = "select numbits from line_bits where file_id = ?" + query = "SELECT numbits FROM line_bits WHERE file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) - query += " and context_id in (" + ids_array + ")" + query += " AND context_id IN (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: bitmaps = list(cur) @@ -1008,11 +1012,11 @@ def arcs(self, filename: str) -> list[TArc] | None: if file_id is None: return None else: - query = "select distinct fromno, tono from arc where file_id = ?" + query = "SELECT DISTINCT fromno, tono FROM arc WHERE file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) - query += " and context_id in (" + ids_array + ")" + query += " AND context_id IN (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: return list(cur) @@ -1034,15 +1038,15 @@ def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: lineno_contexts_map = collections.defaultdict(set) if self.has_arcs(): - query = ( - "select arc.fromno, arc.tono, context.context " + - "from arc, context " + - "where arc.file_id = ? and arc.context_id = context.id" - ) + query = """ + SELECT arc.fromno, arc.tono, context.context + FROM arc, context + WHERE arc.file_id = ? AND arc.context_id = context.id + """ data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) - query += " and arc.context_id in (" + ids_array + ")" + query += " AND arc.context_id IN (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: for fromno, tono, context in cur: @@ -1051,15 +1055,15 @@ def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: if tono > 0: lineno_contexts_map[tono].add(context) else: - query = ( - "select l.numbits, c.context from line_bits l, context c " + - "where l.context_id = c.id " + - "and file_id = ?" - ) + query = """ + SELECT l.numbits, c.context FROM line_bits l, context c + WHERE l.context_id = c.id + AND file_id = ? + """ data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) - query += " and l.context_id in (" + ids_array + ")" + query += " AND l.context_id IN (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: for numbits, context in cur: @@ -1076,9 +1080,9 @@ def sys_info(cls) -> list[tuple[str, Any]]: """ with SqliteDb(":memory:", debug=NoDebugging()) as db: - with db.execute("pragma temp_store") as cur: + with db.execute("PRAGMA temp_store") as cur: temp_store = [row[0] for row in cur] - with db.execute("pragma compile_options") as cur: + with db.execute("PRAGMA compile_options") as cur: copts = [row[0] for row in cur] copts = textwrap.wrap(", ".join(copts), width=75) From e684dff74d8f46c17ea4a1a68f4bed57287b0aff Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Tue, 19 Aug 2025 07:50:35 -0400 Subject: [PATCH 14/43] refactor: two branches that aren't used and aren't needed --- coverage/sqldata.py | 3 +-- coverage/sqlitedb.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 92104a71e..5e3d67906 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -130,8 +130,7 @@ def __init__(self) -> None: def step(self, value: bytes) -> None: """Process one value in the aggregation.""" - if value: - self.result = numbits_union(self.result, value) + self.result = numbits_union(self.result, value) def finalize(self) -> bytes: """Return the final aggregated result.""" diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py index c80702290..21826c801 100644 --- a/coverage/sqlitedb.py +++ b/coverage/sqlitedb.py @@ -217,8 +217,7 @@ def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: def executemany_void(self, sql: str, data: list[Any]) -> None: """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" - if data: - self._executemany(sql, data).close() + self._executemany(sql, data).close() def executescript(self, script: str) -> None: """Same as :meth:`python:sqlite3.Connection.executescript`.""" From 5ddfa076b586b7300d495c7056c3155adaf5fffb Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 20 Aug 2025 07:39:35 -0400 Subject: [PATCH 15/43] style: disable ruff where needed We don't use ruff yet, but we're getting there. --- coverage/config.py | 20 +++++++------------- coverage/control.py | 2 +- coverage/core.py | 6 +----- coverage/debug.py | 2 ++ coverage/env.py | 20 ++++++++++---------- coverage/files.py | 2 ++ coverage/html.py | 4 ++-- coverage/inorout.py | 6 +----- coverage/misc.py | 6 +----- coverage/pytracer.py | 4 ++-- coverage/report.py | 4 ++-- coverage/report_core.py | 7 +------ coverage/sysmon.py | 8 +------- coverage/templite.py | 9 ++------- coverage/types.py | 9 +-------- tests/coveragetest.py | 6 ++---- tests/helpers.py | 4 +--- tests/test_data.py | 4 +--- 18 files changed, 40 insertions(+), 83 deletions(-) diff --git a/coverage/config.py b/coverage/config.py index 32a0b6039..e07fdf752 100644 --- a/coverage/config.py +++ b/coverage/config.py @@ -14,13 +14,7 @@ import os.path import re from collections.abc import Iterable -from typing import ( - Any, - Callable, - Final, - Mapping, - Union, -) +from typing import Any, Callable, Final, Mapping, Union from coverage.exceptions import ConfigError from coverage.misc import human_sorted_items, isolate_module, substitute_variables @@ -386,7 +380,7 @@ def copy(self) -> CoverageConfig: # where is the section:name to read from the configuration file. # type_ is the optional type to apply, by using .getTYPE to read the # configuration value from the file. - + # # [run] ("branch", "run:branch", "boolean"), ("command_line", "run:command_line"), @@ -411,7 +405,7 @@ def copy(self) -> CoverageConfig: ("source_dirs", "run:source_dirs", "list"), ("timid", "run:timid", "boolean"), ("_crash", "run:_crash"), - + # # [report] ("exclude_list", "report:exclude_lines", "regexlist"), ("exclude_also", "report:exclude_also", "regexlist"), @@ -430,7 +424,7 @@ def copy(self) -> CoverageConfig: ("skip_covered", "report:skip_covered", "boolean"), ("skip_empty", "report:skip_empty", "boolean"), ("sort", "report:sort"), - + # # [html] ("extra_css", "html:extra_css"), ("html_dir", "html:directory", "file"), @@ -438,16 +432,16 @@ def copy(self) -> CoverageConfig: ("html_skip_empty", "html:skip_empty", "boolean"), ("html_title", "html:title"), ("show_contexts", "html:show_contexts", "boolean"), - + # # [xml] ("xml_output", "xml:output", "file"), ("xml_package_depth", "xml:package_depth", "int"), - + # # [json] ("json_output", "json:output", "file"), ("json_pretty_print", "json:pretty_print", "boolean"), ("json_show_contexts", "json:show_contexts", "boolean"), - + # # [lcov] ("lcov_output", "lcov:output", "file"), ("lcov_line_checksums", "lcov:line_checksums", "boolean") diff --git a/coverage/control.py b/coverage/control.py index 95171e400..22ecd1652 100644 --- a/coverage/control.py +++ b/coverage/control.py @@ -642,7 +642,7 @@ def _init_for_start(self) -> None: # Register our clean-up handlers. atexit.register(self._atexit) if self.config.sigterm: - is_main = (threading.current_thread() == threading.main_thread()) + is_main = (threading.current_thread() == threading.main_thread()) # fmt: skip if is_main and not env.WINDOWS: # The Python docs seem to imply that SIGTERM works uniformly even # on Windows, but that's not my experience, and this agrees: diff --git a/coverage/core.py b/coverage/core.py index 429598f4b..c5b196788 100644 --- a/coverage/core.py +++ b/coverage/core.py @@ -16,11 +16,7 @@ from coverage.misc import isolate_module from coverage.pytracer import PyTracer from coverage.sysmon import SysMonitor -from coverage.types import ( - TFileDisposition, - Tracer, - TWarnFn, -) +from coverage.types import TFileDisposition, Tracer, TWarnFn os = isolate_module(os) diff --git a/coverage/debug.py b/coverage/debug.py index 039a7a372..218b913e0 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -259,11 +259,13 @@ def short_stack( """ # Regexes in initial frames that we don't care about. + # fmt: off BORING_PRELUDE = [ "", # pytest-xdist has string execution. r"\bigor.py$", # Our test runner. r"\bsite-packages\b", # pytest etc getting to our tests. ] + # fmt: on stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1] if not full: diff --git a/coverage/env.py b/coverage/env.py index 1a45536d5..a2b6c4547 100644 --- a/coverage/env.py +++ b/coverage/env.py @@ -24,8 +24,8 @@ MACOS = sys.platform == "darwin" # Python implementations. -CPYTHON = (platform.python_implementation() == "CPython") -PYPY = (platform.python_implementation() == "PyPy") +CPYTHON = (platform.python_implementation() == "CPython") # fmt: skip +PYPY = (platform.python_implementation() == "PyPy") # fmt: skip # Python versions. We amend version_info with one more value, a zero if an # official version, or 1 if built from source beyond an official version. @@ -54,7 +54,7 @@ class PYBEHAVIOR: # Does Python conform to PEP626, Precise line numbers for debugging and other tools. # https://www.python.org/dev/peps/pep-0626 - pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) + pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) # fmt: skip # Is "if __debug__" optimized away? optimize_if_debug = not pep626 @@ -107,7 +107,7 @@ class PYBEHAVIOR: # wwith.py(3): with open("/tmp/test2", "w") as f3: # wwith.py(1): with open("/tmp/test", "w") as f1: # - exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) + exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) # fmt: skip # When leaving a with-block, do we visit the with-line exactly, # or the context managers in inner-out order? @@ -144,16 +144,16 @@ class PYBEHAVIOR: # mwith.py(3): open("/tmp/two", "w") as f3, # mwith.py(2): open("/tmp/one", "w") as f2, - exit_with_through_ctxmgr = (PYVERSION >= (3, 12, 6)) + exit_with_through_ctxmgr = (PYVERSION >= (3, 12, 6)) # fmt: skip # Match-case construct. - match_case = (PYVERSION >= (3, 10)) + match_case = (PYVERSION >= (3, 10)) # fmt: skip # Some words are keywords in some places, identifiers in other places. - soft_keywords = (PYVERSION >= (3, 10)) + soft_keywords = (PYVERSION >= (3, 10)) # fmt: skip # f-strings are parsed as code, pep 701 - fstring_syntax = (PYVERSION >= (3, 12)) + fstring_syntax = (PYVERSION >= (3, 12)) # fmt: skip # PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/ pep669: Final[bool] = bool(getattr(sys, "monitoring", None)) @@ -162,10 +162,10 @@ class PYBEHAVIOR: # It used to point at the YIELD, in 3.13 it points at the RESUME, # then it went back to the YIELD. # https://github.com/python/cpython/issues/113728 - lasti_is_yield = (PYVERSION[:2] != (3, 13)) + lasti_is_yield = (PYVERSION[:2] != (3, 13)) # fmt: skip # PEP649 and PEP749: Deferred annotations - deferred_annotations = (PYVERSION >= (3, 14)) + deferred_annotations = (PYVERSION >= (3, 14)) # fmt: skip # Does sys.monitoring support BRANCH_RIGHT and BRANCH_LEFT? The names # were added in early 3.14 alphas, but didn't work entirely correctly until diff --git a/coverage/files.py b/coverage/files.py index 0784e14bd..a0090c223 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -307,6 +307,7 @@ def sep(s: str) -> str: # Tokenizer for _glob_to_regex. # None as a sub means disallowed. +# fmt: off G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ (r"\*\*\*+", None), # Can't have *** (r"[^/]+\*\*+", None), # Can't have x** @@ -323,6 +324,7 @@ def sep(s: str) -> str: (r"[\[\]]", None), # Can't have single square brackets (r".", r"\\\g<0>"), # Anything else is escaped to be safe ]] +# fmt: on def _glob_to_regex(pattern: str) -> str: """Convert a file-path glob pattern into a regex.""" diff --git a/coverage/html.py b/coverage/html.py index 836123a7a..e35cabbea 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -446,8 +446,8 @@ def should_report(self, analysis: Analysis, index_page: IndexPage) -> bool: if self.skip_covered: # Don't report on 100% files. - no_missing_lines = (nums.n_missing == 0) - no_missing_branches = (nums.n_partial_branches == 0) + no_missing_lines = (nums.n_missing == 0) # fmt: skip + no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip if no_missing_lines and no_missing_branches: index_page.skipped_covered_count += 1 return False diff --git a/coverage/inorout.py b/coverage/inorout.py index 632798fe7..a19a0ef24 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -17,11 +17,7 @@ import traceback from collections.abc import Iterable from types import FrameType, ModuleType -from typing import ( - TYPE_CHECKING, - Any, - cast, -) +from typing import TYPE_CHECKING, Any, cast from coverage import env from coverage.disposition import FileDisposition, disposition_init diff --git a/coverage/misc.py b/coverage/misc.py index 9cb459c12..29be16e19 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -20,11 +20,7 @@ import types from collections.abc import Iterable, Iterator, Mapping, Sequence from types import ModuleType -from typing import ( - Any, - NoReturn, - TypeVar, -) +from typing import Any, NoReturn, TypeVar # In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of # other packages were importing the exceptions from misc, so import them here. diff --git a/coverage/pytracer.py b/coverage/pytracer.py index 9a24e4faf..c08a9b2d8 100644 --- a/coverage/pytracer.py +++ b/coverage/pytracer.py @@ -246,9 +246,9 @@ def _trace( # The current opcode is guaranteed to be RESUME. The argument # determines what kind of resume it is. oparg = frame.f_code.co_code[frame.f_lasti + 1] - real_call = (oparg == 0) + real_call = (oparg == 0) # fmt: skip else: - real_call = (getattr(frame, "f_lasti", -1) < 0) + real_call = (getattr(frame, "f_lasti", -1) < 0) # fmt: skip if real_call: self.last_line = -frame.f_code.co_firstlineno else: diff --git a/coverage/report.py b/coverage/report.py index 7032cf043..f49cea1db 100644 --- a/coverage/report.py +++ b/coverage/report.py @@ -269,8 +269,8 @@ def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None: nums = analysis.numbers self.total += nums - no_missing_lines = (nums.n_missing == 0) - no_missing_branches = (nums.n_partial_branches == 0) + no_missing_lines = (nums.n_missing == 0) # fmt: skip + no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip if self.config.skip_covered and no_missing_lines and no_missing_branches: # Don't report on 100% files. self.skipped_count += 1 diff --git a/coverage/report_core.py b/coverage/report_core.py index e68138fd9..e19117f98 100644 --- a/coverage/report_core.py +++ b/coverage/report_core.py @@ -7,12 +7,7 @@ import sys from collections.abc import Iterable, Iterator -from typing import ( - IO, - TYPE_CHECKING, - Callable, - Protocol, -) +from typing import IO, TYPE_CHECKING, Callable, Protocol from coverage.exceptions import NoDataError, NotPython from coverage.files import GlobMatcher, prep_patterns diff --git a/coverage/sysmon.py b/coverage/sysmon.py index fa0251441..24be6cbb8 100644 --- a/coverage/sysmon.py +++ b/coverage/sysmon.py @@ -14,13 +14,7 @@ import traceback from dataclasses import dataclass from types import CodeType -from typing import ( - Any, - Callable, - NewType, - Optional, - cast, -) +from typing import Any, Callable, NewType, Optional, cast from coverage import env from coverage.bytecode import TBranchTrails, always_jumps, branch_trails diff --git a/coverage/templite.py b/coverage/templite.py index 5680474bb..73ef54f5d 100644 --- a/coverage/templite.py +++ b/coverage/templite.py @@ -13,12 +13,7 @@ from __future__ import annotations import re -from typing import ( - Any, - Callable, - NoReturn, - cast, -) +from typing import Any, Callable, NoReturn, cast class TempliteSyntaxError(ValueError): @@ -165,7 +160,7 @@ def flush_output() -> None: for token in tokens: if token.startswith("{"): start, end = 2, -2 - squash = (token[-3] == "-") + squash = (token[-3] == "-") # fmt: skip if squash: end = -3 diff --git a/coverage/types.py b/coverage/types.py index 31951d0c9..3c32a5e38 100644 --- a/coverage/types.py +++ b/coverage/types.py @@ -11,14 +11,7 @@ import pathlib from collections.abc import Iterable, Mapping from types import FrameType, ModuleType -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Optional, - Protocol, - Union, -) +from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol, Union if TYPE_CHECKING: from coverage.plugin import FileTracer diff --git a/tests/coveragetest.py b/tests/coveragetest.py index 5ea4b460f..b040b89c1 100644 --- a/tests/coveragetest.py +++ b/tests/coveragetest.py @@ -17,11 +17,9 @@ import shlex import sys -from types import ModuleType -from typing import ( - Any, -) from collections.abc import Collection, Iterable, Iterator, Mapping, Sequence +from types import ModuleType +from typing import Any import coverage from coverage import Coverage diff --git a/tests/helpers.py b/tests/helpers.py index 0e66ae500..31831c4e9 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -20,9 +20,7 @@ import warnings from pathlib import Path -from typing import ( - Any, Callable, NoReturn, TypeVar, cast, -) +from typing import Any, Callable, NoReturn, TypeVar, cast from collections.abc import Iterable, Iterator from coverage import env diff --git a/tests/test_data.py b/tests/test_data.py index a7e63bc27..de89e6f4b 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -12,10 +12,8 @@ import sqlite3 import threading -from typing import ( - Any, Callable, TypeVar, Union, -) from collections.abc import Collection, Iterable, Mapping +from typing import Any, Callable, TypeVar, Union from unittest import mock import pytest From f4541cf93a0558443580ab5325b7e7e6c92c7706 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 07:02:43 -0400 Subject: [PATCH 16/43] style: disable more ruff --- lab/show_pyc.py | 2 ++ tests/test_oddball.py | 2 +- tests/test_testing.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/lab/show_pyc.py b/lab/show_pyc.py index a044eca2c..777f9238b 100644 --- a/lab/show_pyc.py +++ b/lab/show_pyc.py @@ -51,6 +51,7 @@ def show_py_text(text, fname=""): code = compile(text, fname, "exec", dont_inherit=True) show_code(code) +# fmt:off CO_FLAGS = [ ('CO_OPTIMIZED', 0x00001), ('CO_NEWLOCALS', 0x00002), @@ -91,6 +92,7 @@ def show_py_text(text, fname=""): CO_FLAGS += [ ('CO_NO_MONITORING_EVENTS', 0x2000000), ] +# fmt: on def show_code(code, indent='', number=None): label = "" diff --git a/tests/test_oddball.py b/tests/test_oddball.py index 8e527dac7..d7b592d24 100644 --- a/tests/test_oddball.py +++ b/tests/test_oddball.py @@ -136,7 +136,7 @@ def recur(n): self.start_import_stop(cov, "recur") assert cov._collector is not None - pytrace = (cov._collector.tracer_name() == "PyTracer") + pytrace = (cov._collector.tracer_name() == "PyTracer") # fmt: skip expected_missing = [4] if pytrace: # pragma: no metacov expected_missing += [10, 11, 12] diff --git a/tests/test_testing.py b/tests/test_testing.py index f6eaa1f00..53e5c454f 100644 --- a/tests/test_testing.py +++ b/tests/test_testing.py @@ -451,7 +451,7 @@ def test_all_our_source_files() -> None: # Twas brillig and the slithy toves i = 0 for i, (source_file, source) in enumerate(all_our_source_files(), start=1): - has_toves = (source_file.name == "test_testing.py") + has_toves = (source_file.name == "test_testing.py") # fmt: skip assert (("# Twas brillig " + "and the slithy toves") in source) == has_toves assert len(source) > 190 # tests/__init__.py is shortest at 196 assert 120 < i < 200 # currently 125 files From 8f06c4204e28afe8ede6fa1433e31dc3e4579b2a Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 07:43:48 -0400 Subject: [PATCH 17/43] style: use a helper for long help strings --- coverage/cmdline.py | 184 +++++++++++++++++++++++++++----------------- doc/cmd.rst | 4 +- 2 files changed, 115 insertions(+), 73 deletions(-) diff --git a/coverage/cmdline.py b/coverage/cmdline.py index a5af29078..d67ac45fc 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -32,6 +32,12 @@ # When adding to this file, alphabetization is important. Look for # "alphabetize" comments throughout. + +def oneline(text: str) -> str: + """Turn a multi-line string into one line for help to reformat nicely.""" + return " ".join(text.split()) + + class Opts: """A namespace class for individual options we'll build parsers from.""" @@ -48,9 +54,11 @@ class Opts: ) concurrency = optparse.make_option( "", "--concurrency", action="store", metavar="LIBS", - help=( - "Properly measure code using a concurrency library. " + - "Valid values are: {}, or a comma-list of them." + help=oneline( + """ + Properly measure code using a concurrency library. + Valid values are: {}, or a comma-list of them. + """ ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), ) context = optparse.make_option( @@ -59,30 +67,38 @@ class Opts: ) contexts = optparse.make_option( "", "--contexts", action="store", metavar="REGEX1,REGEX2,...", - help=( - "Only display data from lines covered in the given contexts. " + - "Accepts Python regexes, which must be quoted." + help=oneline( + """ + Only display data from lines covered in the given contexts. + Accepts Python regexes, which must be quoted. + """ ), ) datafile = optparse.make_option( "", "--data-file", action="store", metavar="DATAFILE", - help=( - "Base name of the data files to operate on. " + - "Defaults to '.coverage'. [env: COVERAGE_FILE]" + help=oneline( + """ + Base name of the data files to operate on. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ ), ) datafle_input = optparse.make_option( "", "--data-file", action="store", metavar="INFILE", - help=( - "Read coverage data for report generation from this file. " + - "Defaults to '.coverage'. [env: COVERAGE_FILE]" + help=oneline( + """ + Read coverage data for report generation from this file. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ ), ) datafile_output = optparse.make_option( "", "--data-file", action="store", metavar="OUTFILE", - help=( - "Write the recorded coverage data to this file. " + - "Defaults to '.coverage'. [env: COVERAGE_FILE]" + help=oneline( + """ + Write the recorded coverage data to this file. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ ), ) debug = optparse.make_option( @@ -111,9 +127,11 @@ class Opts: ) include = optparse.make_option( "", "--include", action="store", metavar="PAT1,PAT2,...", - help=( - "Include only files whose paths match one of these patterns. " + - "Accepts shell-style wildcards, which must be quoted." + help=oneline( + """ + Include only files whose paths match one of these patterns. + Accepts shell-style wildcards, which must be quoted. + """ ), ) keep = optparse.make_option( @@ -122,9 +140,11 @@ class Opts: ) pylib = optparse.make_option( "-L", "--pylib", action="store_true", - help=( - "Measure coverage even inside the Python installed library, " + - "which isn't done by default." + help=oneline( + """ + Measure coverage even inside the Python installed library, + which isn't done by default. + """ ), ) show_missing = optparse.make_option( @@ -133,16 +153,20 @@ class Opts: ) module = optparse.make_option( "-m", "--module", action="store_true", - help=( - " is an importable Python module, not a script path, " + - "to be run as 'python -m' would run it." + help=oneline( + """ + is an importable Python module, not a script path, + to be run as 'python -m' would run it. + """ ), ) omit = optparse.make_option( "", "--omit", action="store", metavar="PAT1,PAT2,...", - help=( - "Omit files whose paths match one of these patterns. " + - "Accepts shell-style wildcards, which must be quoted." + help=oneline( + """ + Omit files whose paths match one of these patterns. + Accepts shell-style wildcards, which must be quoted. + """ ), ) output_xml = optparse.make_option( @@ -163,16 +187,20 @@ class Opts: ) parallel_mode = optparse.make_option( "-p", "--parallel-mode", action="store_true", - help=( - "Append a unique suffix to the data file name to collect separate " + - "data from multiple processes." + help=oneline( + """ + Append a unique suffix to the data file name to collect separate + data from multiple processes. + """ ), ) precision = optparse.make_option( "", "--precision", action="store", metavar="N", type=int, - help=( - "Number of digits after the decimal point to display for " + - "reported coverage percentages." + help=oneline( + """ + Number of digits after the decimal point to display for + reported coverage percentages. + """ ), ) quiet = optparse.make_option( @@ -181,18 +209,22 @@ class Opts: ) rcfile = optparse.make_option( "", "--rcfile", action="store", - help=( - "Specify configuration file. " + - "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + - "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" + help=oneline( + """ + Specify configuration file. + By default '.coveragerc', 'setup.cfg', 'tox.ini', and + 'pyproject.toml' are tried. [env: COVERAGE_RCFILE] + """ ), ) save_signal = optparse.make_option( "", "--save-signal", action="store", metavar="SIGNAL", choices = ["USR1", "USR2"], - help=( - "Specify a signal that will trigger coverage to write its collected data. " + - "Supported values are: USR1, USR2. Not available on Windows." + help=oneline( + """ + Specify a signal that will trigger coverage to write its collected data. + Supported values are: USR1, USR2. Not available on Windows. + """ ), ) show_contexts = optparse.make_option( @@ -213,9 +245,11 @@ class Opts: ) sort = optparse.make_option( "--sort", action="store", metavar="COLUMN", - help=( - "Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + - "Default is name." + help=oneline( + """ + Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. + Default is name. + """ ), ) source = optparse.make_option( @@ -383,9 +417,11 @@ def get_prog_name(self) -> str: Opts.omit, ] + GLOBAL_ARGS, usage="[options] [modules]", - description=( - "Make annotated copies of the given files, marking statements that are executed " + - "with > and statements that are missed with !." + description=oneline( + """ + Make annotated copies of the given files, marking statements that are executed + with > and statements that are missed with !. + """ ), ), @@ -398,28 +434,31 @@ def get_prog_name(self) -> str: Opts.quiet, ] + GLOBAL_ARGS, usage="[options] ... ", - description=( - "Combine data from multiple coverage files. " + - "The combined results are written to a single " + - "file representing the union of the data. The positional " + - "arguments are data files or directories containing data files. " + - "If no paths are provided, data files in the default data file's " + - "directory are combined." + description=oneline( + """ + Combine data from multiple coverage files. + The combined results are written to a single + file representing the union of the data. The positional + arguments are data files or directories containing data files. + If no paths are provided, data files in the default data file's + directory are combined. + """ ), ), - "debug": CmdOptionParser( "debug", GLOBAL_ARGS, usage="", - description=( - "Display information about the internals of coverage.py, " + - "for diagnosing problems. " + - "Topics are: " + - "'data' to show a summary of the collected data; " + - "'sys' to show installation information; " + - "'config' to show the configuration; " + - "'premain' to show what is calling coverage; " + - "'pybehave' to show internal flags describing Python behavior." + description=oneline( + """ + Display information about the internals of coverage.py, + for diagnosing problems. + Topics are: + 'data' to show a summary of the collected data; + 'sys' to show installation information; + 'config' to show the configuration; + 'premain' to show what is calling coverage; + 'pybehave' to show internal flags describing Python behavior. + """ ), ), @@ -456,10 +495,12 @@ def get_prog_name(self) -> str: Opts.title, ] + GLOBAL_ARGS, usage="[options] [modules]", - description=( - "Create an HTML report of the coverage of the files. " + - "Each file gets its own page, with the source decorated to show " + - "executed, excluded, and missed lines." + description=oneline( + """ + Create an HTML report of the coverage of the files. + Each file gets its own page, with the source decorated to show + executed, excluded, and missed lines. + """ ), ), @@ -968,10 +1009,11 @@ def unglob_args(args: list[str]) -> list[str]: Use "{program_name} help " for detailed help on any command. """, - - "minimum_help": ( - "Code coverage for Python, version {__version__} {extension_modifier}. " + - "Use '{program_name} help' for help." + "minimum_help": oneline( + """ + Code coverage for Python, version {__version__} {extension_modifier}. + Use '{program_name} help' for help. + """ ), "version": "Coverage.py, version {__version__} {extension_modifier}", diff --git a/doc/cmd.rst b/doc/cmd.rst index b2238be9f..68ec406ad 100644 --- a/doc/cmd.rst +++ b/doc/cmd.rst @@ -677,7 +677,7 @@ Click the keyboard icon in the upper right to see the complete list. $ coverage html --help Usage: coverage html [options] [modules] - Create an HTML report of the coverage of the files. Each file gets its own + Create an HTML report of the coverage of the files. Each file gets its own page, with the source decorated to show executed, excluded, and missed lines. Options: @@ -712,7 +712,7 @@ Click the keyboard icon in the upper right to see the complete list. --rcfile=RCFILE Specify configuration file. By default '.coveragerc', 'setup.cfg', 'tox.ini', and 'pyproject.toml' are tried. [env: COVERAGE_RCFILE] -.. [[[end]]] (sum: 46Gm4krZsw) +.. [[[end]]] (sum: DwG6DxRZIf) The title of the report can be set with the ``title`` setting in the ``[html]`` section of the configuration file, or the ``--title`` switch on From 0a7b73346bc498a089df8cc7d4168dab7a326a06 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 08:00:18 -0400 Subject: [PATCH 18/43] refactor: remove unused things from lab/ --- lab/bpo_prelude.py | 12 -- lab/branch_trace.py | 17 --- lab/coverage-03.dtd | 55 --------- lab/find_class.py | 40 ------- lab/genpy.py | 261 ------------------------------------------ lab/new-data.js | 75 ------------ tests/test_testing.py | 4 +- 7 files changed, 2 insertions(+), 462 deletions(-) delete mode 100644 lab/bpo_prelude.py delete mode 100644 lab/branch_trace.py delete mode 100644 lab/coverage-03.dtd delete mode 100644 lab/find_class.py delete mode 100644 lab/genpy.py delete mode 100644 lab/new-data.js diff --git a/lab/bpo_prelude.py b/lab/bpo_prelude.py deleted file mode 100644 index 525972ad3..000000000 --- a/lab/bpo_prelude.py +++ /dev/null @@ -1,12 +0,0 @@ -import linecache, sys - -def trace(frame, event, arg): - # The weird globals here is to avoid a NameError on shutdown... - if frame.f_code.co_filename == globals().get("__file__"): - lineno = frame.f_lineno - line = linecache.getline(__file__, lineno).rstrip() - print("{} {}: {}".format(event[:4], lineno, line)) - return trace - -print(sys.version) -sys.settrace(trace) diff --git a/lab/branch_trace.py b/lab/branch_trace.py deleted file mode 100644 index c2623c477..000000000 --- a/lab/branch_trace.py +++ /dev/null @@ -1,17 +0,0 @@ -import sys - -pairs = set() -last = -1 - -def trace(frame, event, arg): - global last - if event == "line": - this = frame.f_lineno - pairs.add((last, this)) - last = this - return trace - -code = open(sys.argv[1], encoding="utf-8").read() -sys.settrace(trace) -exec(code) -print(sorted(pairs)) diff --git a/lab/coverage-03.dtd b/lab/coverage-03.dtd deleted file mode 100644 index 8a3f8c227..000000000 --- a/lab/coverage-03.dtd +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lab/find_class.py b/lab/find_class.py deleted file mode 100644 index b8ab437b1..000000000 --- a/lab/find_class.py +++ /dev/null @@ -1,40 +0,0 @@ -class Parent: - def meth(self): - print("METH") - -class Child(Parent): - pass - -def trace(frame, event, args): - # Thanks to Aleksi Torhamo for code and idea. - co = frame.f_code - fname = co.co_name - if not co.co_varnames: - return - locs = frame.f_locals - first_arg = co.co_varnames[0] - if co.co_argcount: - self = locs[first_arg] - elif co.co_flags & 0x04: # *args syntax - self = locs[first_arg][0] - else: - return - - func = getattr(self, fname).__func__ - if hasattr(func, '__qualname__'): - qname = func.__qualname__ - else: - for cls in self.__class__.__mro__: - f = cls.__dict__.get(fname, None) - if f is None: - continue - if f is func: - qname = cls.__name__ + "." + fname - break - print(f"{event}: {self}.{fname} {qname}") - return trace - -import sys -sys.settrace(trace) - -Child().meth() diff --git a/lab/genpy.py b/lab/genpy.py deleted file mode 100644 index f88e70ca8..000000000 --- a/lab/genpy.py +++ /dev/null @@ -1,261 +0,0 @@ -"""Generate random Python for testing.""" - -import collections -from itertools import cycle, product -import random -import re - -from coverage.parser import PythonParser - - -class PythonSpinner: - """Spin Python source from a simple AST.""" - - def __init__(self): - self.lines = [] - self.lines.append("async def func():") - self.indent = 4 - - @property - def lineno(self): - return len(self.lines) + 1 - - @classmethod - def generate_python(cls, ast): - spinner = cls() - spinner.gen_python_internal(ast) - return "\n".join(spinner.lines) - - def add_line(self, line): - g = f"g{self.lineno}" - self.lines.append(' ' * self.indent + line.format(g=g, lineno=self.lineno)) - - def add_block(self, node): - self.indent += 4 - self.gen_python_internal(node) - self.indent -= 4 - - def maybe_block(self, node, nodei, keyword): - if len(node) > nodei and node[nodei] is not None: - self.add_line(keyword + ":") - self.add_block(node[nodei]) - - def gen_python_internal(self, ast): - for node in ast: - if isinstance(node, list): - op = node[0] - if op == "if": - self.add_line("if {g}:") - self.add_block(node[1]) - self.maybe_block(node, 2, "else") - elif op == "for": - self.add_line("for x in {g}:") - self.add_block(node[1]) - self.maybe_block(node, 2, "else") - elif op == "while": - self.add_line("while {g}:") - self.add_block(node[1]) - self.maybe_block(node, 2, "else") - elif op == "try": - self.add_line("try:") - self.add_block(node[1]) - # 'except' clauses are different, because there can be any - # number. - if len(node) > 2 and node[2] is not None: - for except_node in node[2]: - self.add_line(f"except Exception{self.lineno}:") - self.add_block(except_node) - self.maybe_block(node, 3, "else") - self.maybe_block(node, 4, "finally") - elif op == "with": - self.add_line("with {g} as x:") - self.add_block(node[1]) - else: - raise Exception(f"Bad list node: {node!r}") - else: - op = node - if op == "assign": - self.add_line("x = {lineno}") - elif op in ["break", "continue"]: - self.add_line(op) - elif op == "return": - self.add_line("return") - elif op == "yield": - self.add_line("yield {lineno}") - else: - raise Exception(f"Bad atom node: {node!r}") - - -def weighted_choice(rand, choices): - """Choose from a list of [(choice, weight), ...] options, randomly.""" - total = sum(w for c, w in choices) - r = rand.uniform(0, total) - upto = 0 - for c, w in choices: - if upto + w >= r: - return c - upto += w - assert False, "Shouldn't get here" - - -class RandomAstMaker: - def __init__(self, seed=None): - self.r = random.Random() - if seed is not None: - self.r.seed(seed) - self.depth = 0 - self.bc_allowed = set() - - def roll(self, prob=0.5): - return self.r.random() <= prob - - def choose(self, choices): - """Roll the dice to choose an option.""" - return weighted_choice(self.r, choices) - - STMT_CHOICES = [ - [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 20), ("return", 1), ("yield", 0)], - [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], - [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], - [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], - [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], - # Last element has to have no compound statements, to limit depth. - [("assign", 10), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], - ] - - def make_body(self, parent): - body = [] - choices = self.STMT_CHOICES[self.depth] - - self.depth += 1 - nstmts = self.choose([(1, 10), (2, 25), (3, 10), (4, 10), (5, 5)]) - for _ in range(nstmts): - stmt = self.choose(choices) - if stmt == "if": - body.append(["if", self.make_body("if")]) - if self.roll(): - body[-1].append(self.make_body("ifelse")) - elif stmt == "for": - old_allowed = self.bc_allowed - self.bc_allowed = self.bc_allowed | {"break", "continue"} - body.append(["for", self.make_body("for")]) - self.bc_allowed = old_allowed - if self.roll(): - body[-1].append(self.make_body("forelse")) - elif stmt == "while": - old_allowed = self.bc_allowed - self.bc_allowed = self.bc_allowed | {"break", "continue"} - body.append(["while", self.make_body("while")]) - self.bc_allowed = old_allowed - if self.roll(): - body[-1].append(self.make_body("whileelse")) - elif stmt == "try": - else_clause = self.make_body("try") if self.roll() else None - old_allowed = self.bc_allowed - self.bc_allowed = self.bc_allowed - {"continue"} - finally_clause = self.make_body("finally") if self.roll() else None - self.bc_allowed = old_allowed - if else_clause: - with_exceptions = True - elif not else_clause and not finally_clause: - with_exceptions = True - else: - with_exceptions = self.roll() - if with_exceptions: - num_exceptions = self.choose([(1, 50), (2, 50)]) - exceptions = [self.make_body("except") for _ in range(num_exceptions)] - else: - exceptions = None - body.append( - ["try", self.make_body("tryelse"), exceptions, else_clause, finally_clause] - ) - elif stmt == "with": - body.append(["with", self.make_body("with")]) - elif stmt == "return": - body.append(stmt) - break - elif stmt == "yield": - body.append("yield") - elif stmt in ["break", "continue"]: - if stmt in self.bc_allowed: - # A break or continue immediately after a loop is not - # interesting. So if we are immediately after a loop, then - # insert an assignment. - if not body and (parent in ["for", "while"]): - body.append("assign") - body.append(stmt) - break - else: - stmt = "assign" - - if stmt == "assign": - # Don't put two assignments in a row, there's no point. - if not body or body[-1] != "assign": - body.append("assign") - - self.depth -= 1 - return body - - -def async_alternatives(source): - parts = re.split(r"(for |with )", source) - nchoices = len(parts) // 2 - #print("{} choices".format(nchoices)) - - def constant(s): - return [s] - - def maybe_async(s): - return [s, "async "+s] - - choices = [f(x) for f, x in zip(cycle([constant, maybe_async]), parts)] - for result in product(*choices): - source = "".join(result) - yield source - - -def compare_alternatives(source): - all_all_arcs = collections.defaultdict(list) - for i, alternate_source in enumerate(async_alternatives(source)): - parser = PythonParser(alternate_source) - arcs = parser.arcs() - all_all_arcs[tuple(arcs)].append((i, alternate_source)) - - return len(all_all_arcs) - - -def show_a_bunch(): - longest = "" - for i in range(100): - maker = RandomAstMaker(i) - source = PythonSpinner.generate_python(maker.make_body("def")) - try: - print("-"*80, "\n", source, sep="") - compile(source, "", "exec", dont_inherit=True) - except Exception as ex: - print(f"Oops: {ex}\n{source}") - if len(source) > len(longest): - longest = source - - -def show_alternatives(): - for i in range(1000): - maker = RandomAstMaker(i) - source = PythonSpinner.generate_python(maker.make_body("def")) - nlines = len(source.splitlines()) - if nlines < 15: - nalt = compare_alternatives(source) - if nalt > 1: - print(f"--- {nlines:3} lines, {nalt:2} alternatives ---------") - print(source) - - - -def show_one(): - maker = RandomAstMaker() - source = PythonSpinner.generate_python(maker.make_body("def")) - print(source) - -if __name__ == "__main__": - show_one() - #show_alternatives() diff --git a/lab/new-data.js b/lab/new-data.js deleted file mode 100644 index 9cb8f715b..000000000 --- a/lab/new-data.js +++ /dev/null @@ -1,75 +0,0 @@ -{ - // As of now: - "lines": { - "a/b/c.py": [1, 2, 3, 4, 5], - "a/b/d.py": [4, 5, 6, 7, 8], - }, - "arcs": { - "a/b/c.py: [[1, 2], [2, 3], [4, 5]], - }, - "file_tracers": { - "a/b/c.py": "fooey.plugin", - }, - - // We used to do this, but it got too bulky, removed in 4.0.1: - "run" { - "collector": "coverage.py 4.0", - "config": { - "branch": true, - "source": ".", - }, - "collected": "20150711T090600", - }, - - // Maybe in the future? - "files": { - "a/b/c.py": { - "lines": [1, 2, 3, 4, 5], - "arcs": [ - [1, 2], [3, 4], [5, -1], - ], - - "plugin": "django.coverage", - - "lines": { - "1": { - "tests": [ - "foo/bar/test.py:TheTest.test_it", - "asdasdasd", - ], - "tests": [17, 34, 23, 12389], - }, - "2": { - "count": 23, - }, - "3": {}, - "4": {}, - "17": {}, - }, - - "arcs": { - "1.2": {}, - "2.3": {}, - "3.-1": {}, - }, - }, - }, - - "tests": [ - { - "file": "a/b/c.py", - "test": "test_it", - }, - { - "file": "a/b/d.py", - "test": "TheTest.test_it", - }, - ], - - "runs": [ - { - // info about each run? - }, - { ... }, - ], -} diff --git a/tests/test_testing.py b/tests/test_testing.py index 53e5c454f..c026d536d 100644 --- a/tests/test_testing.py +++ b/tests/test_testing.py @@ -453,5 +453,5 @@ def test_all_our_source_files() -> None: for i, (source_file, source) in enumerate(all_our_source_files(), start=1): has_toves = (source_file.name == "test_testing.py") # fmt: skip assert (("# Twas brillig " + "and the slithy toves") in source) == has_toves - assert len(source) > 190 # tests/__init__.py is shortest at 196 - assert 120 < i < 200 # currently 125 files + assert len(source) > 190 # tests/__init__.py is shortest at 196 + assert 100 < i < 140 # currently 120 files From 82467f72306efdb207af09ace27b6b3ed4c7ad6f Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 08:00:43 -0400 Subject: [PATCH 19/43] chore: `ruff format .` --- benchmark/benchmark.py | 80 +- benchmark/empty.py | 8 +- benchmark/fake.py | 6 +- benchmark/run.py | 21 +- ci/comment_on_fixes.py | 8 +- ci/session.py | 1 + coverage/bytecode.py | 4 +- coverage/cmdline.py | 255 ++++-- coverage/collector.py | 19 +- coverage/config.py | 52 +- coverage/control.py | 36 +- coverage/core.py | 3 +- coverage/data.py | 2 +- coverage/debug.py | 57 +- coverage/disposition.py | 1 + coverage/env.py | 11 +- coverage/exceptions.py | 12 + coverage/execfile.py | 21 +- coverage/files.py | 30 +- coverage/html.py | 118 +-- coverage/inorout.py | 31 +- coverage/jsonreport.py | 1 + coverage/lcovreport.py | 22 +- coverage/misc.py | 13 +- coverage/multiproc.py | 14 +- coverage/numbits.py | 4 +- coverage/parser.py | 93 ++- coverage/patch.py | 4 + coverage/phystokens.py | 12 +- coverage/plugin.py | 23 +- coverage/plugin_support.py | 12 +- coverage/python.py | 11 +- coverage/pytracer.py | 49 +- coverage/regions.py | 2 + coverage/report.py | 24 +- coverage/report_core.py | 4 +- coverage/results.py | 36 +- coverage/sqldata.py | 67 +- coverage/sqlitedb.py | 25 +- coverage/sysmon.py | 4 +- coverage/templite.py | 5 +- coverage/tomlconfig.py | 3 + coverage/tracer.pyi | 2 + coverage/types.py | 20 +- coverage/version.py | 5 +- coverage/xmlreport.py | 19 +- doc/cog_helpers.py | 3 +- doc/conf.py | 101 +-- igor.py | 21 +- lab/branches.py | 3 +- lab/extract_code.py | 4 +- lab/goals.py | 11 +- lab/hack_pyc.py | 35 +- lab/parser.py | 67 +- lab/run_sysmon.py | 4 + lab/run_trace.py | 25 +- lab/show_pyc.py | 67 +- tests/conftest.py | 1 + tests/coveragetest.py | 34 +- tests/goldtest.py | 17 +- tests/helpers.py | 27 +- tests/mixins.py | 1 + tests/modules/pkg1/__main__.py | 1 + tests/modules/pkg1/runmod2.py | 1 + tests/modules/pkg1/sub/__main__.py | 1 + tests/modules/pkg1/sub/runmod3.py | 1 + tests/modules/plugins/a_plugin.py | 2 +- tests/modules/plugins/another.py | 3 +- tests/modules/process_test/try_execfile.py | 60 +- tests/modules/runmod1.py | 1 + tests/osinfo.py | 41 +- tests/plugin1.py | 6 +- tests/plugin2.py | 14 +- tests/plugin_config.py | 3 +- tests/select_plugin.py | 6 +- tests/test_annotate.py | 42 +- tests/test_api.py | 374 ++++++--- tests/test_arcs.py | 488 +++++++---- tests/test_bytecode.py | 2 +- tests/test_cmdline.py | 902 ++++++++++++++------- tests/test_collector.py | 16 +- tests/test_concurrency.py | 133 +-- tests/test_config.py | 367 ++++++--- tests/test_context.py | 58 +- tests/test_coverage.py | 611 ++++++++------ tests/test_data.py | 353 ++++---- tests/test_debug.py | 145 ++-- tests/test_execfile.py | 94 ++- tests/test_filereporter.py | 2 +- tests/test_files.py | 566 +++++++------ tests/test_goldtest.py | 46 +- tests/test_html.py | 424 ++++++---- tests/test_json.py | 152 ++-- tests/test_lcov.py | 126 ++- tests/test_misc.py | 54 +- tests/test_mixins.py | 12 +- tests/test_numbits.py | 58 +- tests/test_oddball.py | 236 ++++-- tests/test_parser.py | 367 ++++++--- tests/test_phystokens.py | 85 +- tests/test_plugins.py | 491 +++++++---- tests/test_process.py | 529 ++++++++---- tests/test_python.py | 16 +- tests/test_regions.py | 50 +- tests/test_report.py | 336 +++++--- tests/test_report_common.py | 61 +- tests/test_results.py | 171 ++-- tests/test_setup.py | 8 +- tests/test_sqlitedb.py | 9 +- tests/test_templite.py | 153 ++-- tests/test_testing.py | 110 ++- tests/test_venv.py | 158 ++-- tests/test_version.py | 22 +- tests/test_xml.py | 226 +++--- 114 files changed, 6171 insertions(+), 3693 deletions(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 726f686ff..665143962 100644 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -109,9 +109,7 @@ def run_command(self, cmd: str) -> str: if proc.returncode != 0: self.print(f"ERROR: command returned {proc.returncode}") - raise Exception( - f"Command failed ({proc.returncode}): {cmd!r}, output was:\n{output}" - ) + raise Exception(f"Command failed ({proc.returncode}): {cmd!r}, output was:\n{output}") return output.strip() @@ -318,9 +316,7 @@ def run_with_coverage(self, env: Env, cov_ver: Coverage) -> float: with self.tweak_coverage_settings(cov_ver.tweaks): self.pre_check(env) # NOTE: Not properly factored, and only used from here. duration = self.run_tox(env, env.pyver.toxenv, "--skip-pkg-install") - self.post_check( - env - ) # NOTE: Not properly factored, and only used from here. + self.post_check(env) # NOTE: Not properly factored, and only used from here. return duration @@ -333,9 +329,7 @@ def run_with_coverage(self, env: Env, cov_ver: Coverage) -> float: raise Exception("This doesn't work because options changed to tweaks") covenv = env.pyver.toxenv + "-cov" # type: ignore[unreachable] self.run_tox(env, covenv, "--notest") - env.shell.run_command( - f".tox/{covenv}/bin/python -m pip install {cov_ver.pip_args}" - ) + env.shell.run_command(f".tox/{covenv}/bin/python -m pip install {cov_ver.pip_args}") if cov_ver.tweaks: replace = ("# reference: https", f"[run]\n{cov_ver.tweaks}\n#") else: @@ -499,9 +493,7 @@ def run_with_coverage(self, env: Env, cov_ver: Coverage) -> float: ) with self.tweak_coverage_settings(cov_ver.tweaks): self.pre_check(env) # NOTE: Not properly factored, and only used here. - duration = self.run_tox( - env, env.pyver.toxenv, "--skip-pkg-install -- --cov" - ) + duration = self.run_tox(env, env.pyver.toxenv, "--skip-pkg-install -- --cov") self.post_check(env) # NOTE: Not properly factored, and only used here. return duration @@ -542,9 +534,7 @@ def run_no_coverage(self, env: Env) -> float: def run_with_coverage(self, env: Env, cov_ver: Coverage) -> float: env.shell.run_command(f"{env.python} -m pip install {cov_ver.pip_args}") - env.shell.run_command( - f"{env.python} -m coverage run -m unittest tests.test_suite" - ) + env.shell.run_command(f"{env.python} -m coverage run -m unittest tests.test_suite") duration = env.shell.last_duration report = env.shell.run_command(f"{env.python} -m coverage report --precision=6") print("Results:", report.splitlines()[-1]) @@ -598,19 +588,21 @@ def run_with_coverage(self, env: Env, cov_ver: Coverage) -> float: class ProjectMypy(ToxProject): git_url = "https://github.com/python/mypy" - SLOW_TESTS = " or ".join([ - "PythonCmdline", - "PEP561Suite", - "PythonEvaluation", - "testdaemon", - "StubgenCmdLine", - "StubgenPythonSuite", - "TestRun", - "TestRunMultiFile", - "TestExternal", - "TestCommandLine", - "ErrorStreamSuite", - ]) + SLOW_TESTS = " or ".join( + [ + "PythonCmdline", + "PEP561Suite", + "PythonEvaluation", + "testdaemon", + "StubgenCmdLine", + "StubgenPythonSuite", + "TestRun", + "TestRunMultiFile", + "TestExternal", + "TestCommandLine", + "ErrorStreamSuite", + ] + ) FAST = f"-k 'not ({SLOW_TESTS})'" @@ -713,9 +705,7 @@ def tweak_toml_coverage_settings(toml_file: str, tweaks: TweaksType) -> Iterator class AdHocProject(ProjectToTest): """A standalone program to run locally.""" - def __init__( - self, python_file: str, cur_dir: str | None = None, pip_args: str = "" - ): + def __init__(self, python_file: str, cur_dir: str | None = None, pip_args: str = ""): super().__init__() self.python_file = Path(python_file) if not self.python_file.exists(): @@ -821,9 +811,7 @@ def __init__(self, slug: str = "nocov"): class CoveragePR(Coverage): """A version of coverage.py from a pull request.""" - def __init__( - self, number: int, tweaks: TweaksType = None, env_vars: Env_VarsType = None - ): + def __init__(self, number: int, tweaks: TweaksType = None, env_vars: Env_VarsType = None): url = f"https://github.com/nedbat/coveragepy.git@refs/pull/{number}/merge" url_must_exist(url) super().__init__( @@ -837,9 +825,7 @@ def __init__( class CoverageCommit(Coverage): """A version of coverage.py from a specific commit.""" - def __init__( - self, sha: str, tweaks: TweaksType = None, env_vars: Env_VarsType = None - ): + def __init__(self, sha: str, tweaks: TweaksType = None, env_vars: Env_VarsType = None): url = f"https://github.com/nedbat/coveragepy.git@{sha}" url_must_exist(url) super().__init__( @@ -915,18 +901,11 @@ def load_results(self) -> dict[ResultKey, list[float]]: if self.results_file.exists(): with self.results_file.open("r", encoding="utf-8") as f: data: dict[str, list[float]] = json.load(f) - return { - (k.split()[0], k.split()[1], k.split()[2]): v for k, v in data.items() - } + return {(k.split()[0], k.split()[1], k.split()[2]): v for k, v in data.items()} return {} def run(self, num_runs: int = 3) -> None: - total_runs = ( - len(self.projects) - * len(self.py_versions) - * len(self.cov_versions) - * num_runs - ) + total_runs = len(self.projects) * len(self.py_versions) * len(self.cov_versions) * num_runs total_run_nums = iter(itertools.count(start=1)) all_runs = [] @@ -962,10 +941,7 @@ def run(self, num_runs: int = 3) -> None: for proj, pyver, cov_ver, env in all_runs: result_key = (proj.slug, pyver.slug, cov_ver.slug) total_run_num = next(total_run_nums) - if ( - result_key in self.result_data - and len(self.result_data[result_key]) >= num_runs - ): + if result_key in self.result_data and len(self.result_data[result_key]) >= num_runs: print(f"Skipping {result_key} as results already exist.") continue @@ -1100,9 +1076,7 @@ def run_experiment( if any(rslug not in slugs for rslug in ratio_slugs): raise Exception(f"Ratio slug doesn't match a slug: {ratio_slugs}, {slugs}") if set(rows + [column]) != set(DIMENSION_NAMES): - raise Exception( - f"All of these must be in rows or column: {', '.join(DIMENSION_NAMES)}" - ) + raise Exception(f"All of these must be in rows or column: {', '.join(DIMENSION_NAMES)}") print(f"Removing and re-making {PERF_DIR}") remake(PERF_DIR) diff --git a/benchmark/empty.py b/benchmark/empty.py index ca457997c..f9c6e422a 100644 --- a/benchmark/empty.py +++ b/benchmark/empty.py @@ -7,13 +7,9 @@ ], cov_versions=[ Coverage("701", "coverage==7.0.1"), - Coverage( - "701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")] - ), + Coverage("701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")]), Coverage("702", "coverage==7.0.2"), - Coverage( - "702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")] - ), + Coverage("702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")]), ], projects=[ EmptyProject("empty", [1.2, 3.4]), diff --git a/benchmark/fake.py b/benchmark/fake.py index 9c268daaa..3227a8ebd 100644 --- a/benchmark/fake.py +++ b/benchmark/fake.py @@ -1,9 +1,11 @@ from benchmark import * + class ProjectSlow(EmptyProject): def __init__(self): super().__init__(slug="slow", fake_durations=[23.9, 24.2]) + class ProjectOdd(EmptyProject): def __init__(self): super().__init__(slug="odd", fake_durations=[10.1, 10.5, 9.9]) @@ -13,7 +15,7 @@ def __init__(self): py_versions=[ Python(3, 10), Python(3, 11), - # Python(3, 12), + # Python(3, 12), ], cov_versions=[ Coverage("753", "coverage==7.5.3"), @@ -27,6 +29,6 @@ def __init__(self): column="pyver", ratios=[ ("11 vs 10", "python3.11", "python3.10"), - # ("12 vs 11", "python3.12", "python3.11"), + # ("12 vs 11", "python3.12", "python3.11"), ], ) diff --git a/benchmark/run.py b/benchmark/run.py index 075196a6d..8a1d69867 100644 --- a/benchmark/run.py +++ b/benchmark/run.py @@ -9,7 +9,7 @@ action="store_true", dest="clean", default=False, - help="Delete the results.json file before running benchmarks" + help="Delete the results.json file before running benchmarks", ) options, args = parser.parse_args() @@ -52,13 +52,9 @@ ], cov_versions=[ Coverage("701", "coverage==7.0.1"), - Coverage( - "701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")] - ), + Coverage("701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")]), Coverage("702", "coverage==7.0.2"), - Coverage( - "702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")] - ), + Coverage("702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")]), ], projects=[ ProjectAttrs(), @@ -88,8 +84,7 @@ rows=["cov", "proj"], column="pyver", ratios=[ - (f"3.{b} vs 3.{a}", f"python3.{b}", f"python3.{a}") - for a, b in zip(vers, vers[1:]) + (f"3.{b} vs 3.{a}", f"python3.{b}", f"python3.{a}") for a, b in zip(vers, vers[1:]) ], ) @@ -107,8 +102,8 @@ CoverageSource(slug="sysmon", env_vars={"COVERAGE_CORE": "sysmon"}), ], projects=[ - ProjectPillow(), #"-k test_pickle"), - ProjectPillowBranch(), #"-k test_pickle"), + ProjectPillow(), # "-k test_pickle"), + ProjectPillowBranch(), # "-k test_pickle"), # ProjectSphinx(), # Works, slow # ProjectPygments(), # Doesn't work on 3.14 # ProjectRich(), # Doesn't work @@ -165,8 +160,8 @@ ), ], projects=[ - ProjectMashumaro(), # small: "-k ck" - ProjectOperator(), # small: "-k irk" + ProjectMashumaro(), # small: "-k ck" + ProjectOperator(), # small: "-k irk" ], rows=["pyver", "proj"], column="cov", diff --git a/ci/comment_on_fixes.py b/ci/comment_on_fixes.py index 137d6b414..ff497c8e5 100644 --- a/ci/comment_on_fixes.py +++ b/ci/comment_on_fixes.py @@ -16,18 +16,18 @@ # Get the first entry in the changelog: for etitle, sections in changelog.entries().items(): - version = etitle.split()[1] # particular to our title format. + version = etitle.split()[1] # particular to our title format. text = "\n".join(sections) break comment = ( - f"This is now released as part of [coverage {version}]" + - f"(https://pypi.org/project/coverage/{version})." + f"This is now released as part of [coverage {version}]" + + f"(https://pypi.org/project/coverage/{version})." ) print(f"Comment will be:\n\n{comment}\n") repo_owner = sys.argv[1] -url_matches = re.finditer(fr"https://github.com/{repo_owner}/(issues|pull)/(\d+)", text) +url_matches = re.finditer(rf"https://github.com/{repo_owner}/(issues|pull)/(\d+)", text) urls = set((m[0], m[1], m[2]) for m in url_matches) for url, kind, number in urls: diff --git a/ci/session.py b/ci/session.py index bab820d4f..dd86decde 100644 --- a/ci/session.py +++ b/ci/session.py @@ -10,6 +10,7 @@ _SESSIONS = {} + def get_session(env="GITHUB_TOKEN"): """Get a properly authenticated requests Session. diff --git a/coverage/bytecode.py b/coverage/bytecode.py index a61f05284..23c3a6c6f 100644 --- a/coverage/bytecode.py +++ b/coverage/bytecode.py @@ -116,9 +116,7 @@ def branch_trails(code: CodeType) -> TBranchTrails: arc from the original instruction's line to the new source line. """ - the_trails: TBranchTrails = collections.defaultdict( - lambda: collections.defaultdict(set) - ) + the_trails: TBranchTrails = collections.defaultdict(lambda: collections.defaultdict(set)) iwalker = InstructionWalker(code) for inst in iwalker.walk(follow_jumps=False): if not inst.jump_target: diff --git a/coverage/cmdline.py b/coverage/cmdline.py index d67ac45fc..fc4da70c2 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -45,15 +45,22 @@ class Opts: # appears on the command line. append = optparse.make_option( - "-a", "--append", action="store_true", + "-a", + "--append", + action="store_true", help="Append data to the data file. Otherwise it starts clean each time.", ) branch = optparse.make_option( - "", "--branch", action="store_true", + "", + "--branch", + action="store_true", help="Measure branch coverage in addition to statement coverage.", ) concurrency = optparse.make_option( - "", "--concurrency", action="store", metavar="LIBS", + "", + "--concurrency", + action="store", + metavar="LIBS", help=oneline( """ Properly measure code using a concurrency library. @@ -62,11 +69,17 @@ class Opts: ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), ) context = optparse.make_option( - "", "--context", action="store", metavar="LABEL", + "", + "--context", + action="store", + metavar="LABEL", help="The context label to record for this coverage run.", ) contexts = optparse.make_option( - "", "--contexts", action="store", metavar="REGEX1,REGEX2,...", + "", + "--contexts", + action="store", + metavar="REGEX1,REGEX2,...", help=oneline( """ Only display data from lines covered in the given contexts. @@ -75,7 +88,10 @@ class Opts: ), ) datafile = optparse.make_option( - "", "--data-file", action="store", metavar="DATAFILE", + "", + "--data-file", + action="store", + metavar="DATAFILE", help=oneline( """ Base name of the data files to operate on. @@ -84,7 +100,10 @@ class Opts: ), ) datafle_input = optparse.make_option( - "", "--data-file", action="store", metavar="INFILE", + "", + "--data-file", + action="store", + metavar="INFILE", help=oneline( """ Read coverage data for report generation from this file. @@ -93,7 +112,10 @@ class Opts: ), ) datafile_output = optparse.make_option( - "", "--data-file", action="store", metavar="OUTFILE", + "", + "--data-file", + action="store", + metavar="OUTFILE", help=oneline( """ Write the recorded coverage data to this file. @@ -102,31 +124,51 @@ class Opts: ), ) debug = optparse.make_option( - "", "--debug", action="store", metavar="OPTS", + "", + "--debug", + action="store", + metavar="OPTS", help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", ) directory = optparse.make_option( - "-d", "--directory", action="store", metavar="DIR", + "-d", + "--directory", + action="store", + metavar="DIR", help="Write the output files to DIR.", ) fail_under = optparse.make_option( - "", "--fail-under", action="store", metavar="MIN", type="float", + "", + "--fail-under", + action="store", + metavar="MIN", + type="float", help="Exit with a status of 2 if the total coverage is less than MIN.", ) format = optparse.make_option( - "", "--format", action="store", metavar="FORMAT", + "", + "--format", + action="store", + metavar="FORMAT", help="Output format, either text (default), markdown, or total.", ) help = optparse.make_option( - "-h", "--help", action="store_true", + "-h", + "--help", + action="store_true", help="Get help on this command.", ) ignore_errors = optparse.make_option( - "-i", "--ignore-errors", action="store_true", + "-i", + "--ignore-errors", + action="store_true", help="Ignore errors while reading source files.", ) include = optparse.make_option( - "", "--include", action="store", metavar="PAT1,PAT2,...", + "", + "--include", + action="store", + metavar="PAT1,PAT2,...", help=oneline( """ Include only files whose paths match one of these patterns. @@ -135,11 +177,15 @@ class Opts: ), ) keep = optparse.make_option( - "", "--keep", action="store_true", + "", + "--keep", + action="store_true", help="Keep original coverage files, otherwise they are deleted.", ) pylib = optparse.make_option( - "-L", "--pylib", action="store_true", + "-L", + "--pylib", + action="store_true", help=oneline( """ Measure coverage even inside the Python installed library, @@ -148,11 +194,15 @@ class Opts: ), ) show_missing = optparse.make_option( - "-m", "--show-missing", action="store_true", + "-m", + "--show-missing", + action="store_true", help="Show line numbers of statements in each module that weren't executed.", ) module = optparse.make_option( - "-m", "--module", action="store_true", + "-m", + "--module", + action="store_true", help=oneline( """ is an importable Python module, not a script path, @@ -161,7 +211,10 @@ class Opts: ), ) omit = optparse.make_option( - "", "--omit", action="store", metavar="PAT1,PAT2,...", + "", + "--omit", + action="store", + metavar="PAT1,PAT2,...", help=oneline( """ Omit files whose paths match one of these patterns. @@ -170,23 +223,39 @@ class Opts: ), ) output_xml = optparse.make_option( - "-o", "", action="store", dest="outfile", metavar="OUTFILE", + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", help="Write the XML report to this file. Defaults to 'coverage.xml'", ) output_json = optparse.make_option( - "-o", "", action="store", dest="outfile", metavar="OUTFILE", + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", help="Write the JSON report to this file. Defaults to 'coverage.json'", ) output_lcov = optparse.make_option( - "-o", "", action="store", dest="outfile", metavar="OUTFILE", + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", help="Write the LCOV report to this file. Defaults to 'coverage.lcov'", ) json_pretty_print = optparse.make_option( - "", "--pretty-print", action="store_true", + "", + "--pretty-print", + action="store_true", help="Format the JSON for human readers.", ) parallel_mode = optparse.make_option( - "-p", "--parallel-mode", action="store_true", + "-p", + "--parallel-mode", + action="store_true", help=oneline( """ Append a unique suffix to the data file name to collect separate @@ -195,7 +264,11 @@ class Opts: ), ) precision = optparse.make_option( - "", "--precision", action="store", metavar="N", type=int, + "", + "--precision", + action="store", + metavar="N", + type=int, help=oneline( """ Number of digits after the decimal point to display for @@ -204,11 +277,15 @@ class Opts: ), ) quiet = optparse.make_option( - "-q", "--quiet", action="store_true", + "-q", + "--quiet", + action="store_true", help="Don't print messages about what is happening.", ) rcfile = optparse.make_option( - "", "--rcfile", action="store", + "", + "--rcfile", + action="store", help=oneline( """ Specify configuration file. @@ -218,8 +295,11 @@ class Opts: ), ) save_signal = optparse.make_option( - "", "--save-signal", action="store", metavar="SIGNAL", - choices = ["USR1", "USR2"], + "", + "--save-signal", + action="store", + metavar="SIGNAL", + choices=["USR1", "USR2"], help=oneline( """ Specify a signal that will trigger coverage to write its collected data. @@ -228,23 +308,30 @@ class Opts: ), ) show_contexts = optparse.make_option( - "--show-contexts", action="store_true", + "--show-contexts", + action="store_true", help="Show contexts for covered lines.", ) skip_covered = optparse.make_option( - "--skip-covered", action="store_true", + "--skip-covered", + action="store_true", help="Skip files with 100% coverage.", ) no_skip_covered = optparse.make_option( - "--no-skip-covered", action="store_false", dest="skip_covered", + "--no-skip-covered", + action="store_false", + dest="skip_covered", help="Disable --skip-covered.", ) skip_empty = optparse.make_option( - "--skip-empty", action="store_true", + "--skip-empty", + action="store_true", help="Skip files with no code.", ) sort = optparse.make_option( - "--sort", action="store", metavar="COLUMN", + "--sort", + action="store", + metavar="COLUMN", help=oneline( """ Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. @@ -253,22 +340,33 @@ class Opts: ), ) source = optparse.make_option( - "", "--source", action="store", metavar="SRC1,SRC2,...", + "", + "--source", + action="store", + metavar="SRC1,SRC2,...", help="A list of directories or importable names of code to measure.", ) timid = optparse.make_option( - "", "--timid", action="store_true", + "", + "--timid", + action="store_true", help="Use the slower Python trace function core.", ) title = optparse.make_option( - "", "--title", action="store", metavar="TITLE", + "", + "--title", + action="store", + metavar="TITLE", help="A text string to use as the title on the HTML.", ) version = optparse.make_option( - "", "--version", action="store_true", + "", + "--version", + action="store_true", help="Display version information and exit.", ) + class CoverageOptionParser(optparse.OptionParser): """Base OptionParser for coverage.py. @@ -320,6 +418,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" + pass def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: @@ -346,10 +445,12 @@ class GlobalOptionParser(CoverageOptionParser): def __init__(self) -> None: super().__init__() - self.add_options([ - Opts.help, - Opts.version, - ]) + self.add_options( + [ + Opts.help, + Opts.version, + ] + ) class CmdOptionParser(CoverageOptionParser): @@ -380,12 +481,12 @@ def __init__( self.add_options(options) self.cmd = action - def __eq__(self, other: str) -> bool: # type: ignore[override] + def __eq__(self, other: str) -> bool: # type: ignore[override] # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. - return (other == f"") + return other == f"" - __hash__ = None # type: ignore[assignment] + __hash__ = None # type: ignore[assignment] def get_prog_name(self) -> str: """Override of an undocumented function in optparse.OptionParser.""" @@ -394,6 +495,7 @@ def get_prog_name(self) -> str: # Include the sub-command for this parser as part of the command. return f"{program_name} {self.cmd}" + # In lists of Opts, keep them alphabetized by the option names as they appear # on the command line, since these lists determine the order of the options in # the help output. @@ -415,7 +517,8 @@ def get_prog_name(self) -> str: Opts.ignore_errors, Opts.include, Opts.omit, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description=oneline( """ @@ -424,7 +527,6 @@ def get_prog_name(self) -> str: """ ), ), - "combine": CmdOptionParser( "combine", [ @@ -432,7 +534,8 @@ def get_prog_name(self) -> str: Opts.datafile, Opts.keep, Opts.quiet, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] ... ", description=oneline( """ @@ -446,7 +549,8 @@ def get_prog_name(self) -> str: ), ), "debug": CmdOptionParser( - "debug", GLOBAL_ARGS, + "debug", + GLOBAL_ARGS, usage="", description=oneline( """ @@ -461,21 +565,20 @@ def get_prog_name(self) -> str: """ ), ), - "erase": CmdOptionParser( "erase", [ Opts.datafile, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, description="Erase previously collected coverage data.", ), - "help": CmdOptionParser( - "help", GLOBAL_ARGS, + "help", + GLOBAL_ARGS, usage="[command]", description="Describe how to use coverage.py", ), - "html": CmdOptionParser( "html", [ @@ -493,7 +596,8 @@ def get_prog_name(self) -> str: Opts.no_skip_covered, Opts.skip_empty, Opts.title, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description=oneline( """ @@ -503,7 +607,6 @@ def get_prog_name(self) -> str: """ ), ), - "json": CmdOptionParser( "json", [ @@ -517,11 +620,11 @@ def get_prog_name(self) -> str: Opts.json_pretty_print, Opts.quiet, Opts.show_contexts, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description="Generate a JSON report of coverage results.", ), - "lcov": CmdOptionParser( "lcov", [ @@ -532,11 +635,11 @@ def get_prog_name(self) -> str: Opts.output_lcov, Opts.omit, Opts.quiet, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description="Generate an LCOV report of coverage results.", ), - "report": CmdOptionParser( "report", [ @@ -553,11 +656,11 @@ def get_prog_name(self) -> str: Opts.skip_covered, Opts.no_skip_covered, Opts.skip_empty, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description="Report coverage statistics on modules.", ), - "run": CmdOptionParser( "run", [ @@ -574,11 +677,11 @@ def get_prog_name(self) -> str: Opts.save_signal, Opts.source, Opts.timid, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [program options]", description="Run a Python program, measuring code execution.", ), - "xml": CmdOptionParser( "xml", [ @@ -590,7 +693,8 @@ def get_prog_name(self) -> str: Opts.output_xml, Opts.quiet, Opts.skip_empty, - ] + GLOBAL_ARGS, + ] + + GLOBAL_ARGS, usage="[options] [modules]", description="Generate an XML report of coverage results.", ), @@ -618,7 +722,7 @@ def show_help( # get back to the original form. auto_suffix = "-script.py" if program_name.endswith(auto_suffix): - program_name = program_name[:-len(auto_suffix)] + program_name = program_name[: -len(auto_suffix)] help_params = dict(coverage.__dict__) help_params["__url__"] = __url__ @@ -857,7 +961,7 @@ def do_help( return False def do_signal_save(self, _signum: int, _frame: types.FrameType | None) -> None: - """ Signal handler to save coverage report """ + """Signal handler to save coverage report""" print("Saving coverage data...", flush=True) self.coverage.save() @@ -891,9 +995,9 @@ def do_run(self, options: optparse.Values, args: list[str]) -> int: # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( - "Options affecting multiprocessing must only be specified " + - "in a configuration file.\n" + - f"Remove --{opt_name} from the command line.", + "Options affecting multiprocessing must only be specified " + + "in a configuration file.\n" + + f"Remove --{opt_name} from the command line.", ) return ERR @@ -1015,7 +1119,6 @@ def unglob_args(args: list[str]) -> list[str]: Use '{program_name} help' for help. """ ), - "version": "Coverage.py, version {__version__} {extension_modifier}", } @@ -1034,7 +1137,7 @@ def main(argv: list[str] | None = None) -> int | None: # An exception was caught while running the product code. The # sys.exc_info() return tuple is packed into an _ExceptionDuringRun # exception. - traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter + traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter status = ERR except _BaseCoverageException as err: # A controlled error inside coverage.py: print the message to the user. @@ -1049,16 +1152,18 @@ def main(argv: list[str] | None = None) -> int | None: status = None return status + # Profiling using ox_profile. Install it from GitHub: # pip install git+https://github.com/emin63/ox_profile.git # # $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. _profile = os.getenv("COVERAGE_PROFILE") -if _profile: # pragma: debugging +if _profile: # pragma: debugging from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error + original_main = main - def main( # pylint: disable=function-redefined + def main( # pylint: disable=function-redefined argv: list[str] | None = None, ) -> int | None: """A wrapper around main that profiles.""" diff --git a/coverage/collector.py b/coverage/collector.py index 525c89c2b..59f0938af 100644 --- a/coverage/collector.py +++ b/coverage/collector.py @@ -142,14 +142,17 @@ def __init__( if "greenlet" in concurrencies: tried = "greenlet" import greenlet + self.concur_id_func = greenlet.getcurrent elif "eventlet" in concurrencies: tried = "eventlet" import eventlet.greenthread # pylint: disable=import-error,useless-suppression + self.concur_id_func = eventlet.greenthread.getcurrent elif "gevent" in concurrencies: tried = "gevent" import gevent # pylint: disable=import-error,useless-suppression + self.concur_id_func = gevent.getcurrent if "thread" in concurrencies: @@ -161,7 +164,8 @@ def __init__( if self.concur_id_func and not hasattr(core.tracer_class, "concur_id_func"): raise ConfigError( "Can't support concurrency={} with {}, only threads are supported.".format( - tried, self.tracer_name(), + tried, + self.tracer_name(), ), ) @@ -170,6 +174,7 @@ def __init__( # it's imported early, and the program being measured uses # gevent, then gevent's monkey-patching won't work properly. import threading + self.threading = threading self.reset() @@ -222,6 +227,7 @@ def reset(self) -> None: # FileDisposition will be replaced by None in the cache. if env.PYPY: import __pypy__ # pylint: disable=import-error + # Alex Gaynor said: # should_trace_cache is a strictly growing key: once a key is in # it, it never changes. Further, the keys used to access it are @@ -418,7 +424,7 @@ def disable_plugin(self, disposition: TFileDisposition) -> None: plugin._coverage_enabled = False disposition.trace = False - @functools.cache # pylint: disable=method-cache-max-size-none + @functools.cache # pylint: disable=method-cache-max-size-none def cached_mapped_file(self, filename: str) -> str: """A locally cached version of file names mapped through file_mapper.""" return self.file_mapper(filename) @@ -430,14 +436,14 @@ def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]: # in other threads. We try three times in case of concurrent # access, hoping to get a clean copy. runtime_err = None - for _ in range(3): # pragma: part covered + for _ in range(3): # pragma: part covered try: items = list(d.items()) - except RuntimeError as ex: # pragma: cant happen + except RuntimeError as ex: # pragma: cant happen runtime_err = ex else: break - else: # pragma: cant happen + else: # pragma: cant happen assert isinstance(runtime_err, Exception) raise runtime_err @@ -487,8 +493,7 @@ def flush_data(self) -> bool: self.covdata.add_lines(self.mapped_file_dict(line_data)) file_tracers = { - k: v for k, v in self.file_tracers.items() - if v not in self.disabled_plugins + k: v for k, v in self.file_tracers.items() if v not in self.disabled_plugins } self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) diff --git a/coverage/config.py b/coverage/config.py index e07fdf752..7cde8ec42 100644 --- a/coverage/config.py +++ b/coverage/config.py @@ -47,7 +47,7 @@ def __init__(self, our_file: bool) -> None: if our_file: self.section_prefixes.append("") - def read( # type: ignore[override] + def read( # type: ignore[override] self, filenames: Iterable[str], encoding_unused: str | None = None, @@ -64,16 +64,16 @@ def real_section(self, section: str) -> str | None: return real_section return None - def has_option(self, section: str, option: str) -> bool: # type: ignore[override] + def has_option(self, section: str, option: str) -> bool: # type: ignore[override] real_section = self.real_section(section) if real_section is not None: return super().has_option(real_section, option) return False - def has_section(self, section: str) -> bool: # type: ignore[override] + def has_section(self, section: str) -> bool: # type: ignore[override] return bool(self.real_section(section)) - def options(self, section: str) -> list[str]: # type: ignore[override] + def options(self, section: str) -> list[str]: # type: ignore[override] real_section = self.real_section(section) if real_section is not None: return super().options(real_section) @@ -86,7 +86,7 @@ def get_section(self, section: str) -> TConfigSectionOut: d[opt] = self.get(section, opt) return d - def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore + def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore """Get a value, replacing environment variables also. The arguments are the same as `ConfigParser.get`, but in the found @@ -174,6 +174,7 @@ class CoverageConfig(TConfigurable, TPluginConfig): operation of coverage.py. """ + # pylint: disable=too-many-instance-attributes def __init__(self) -> None: @@ -263,14 +264,20 @@ def __init__(self) -> None: self.plugin_options: dict[str, TConfigSectionOut] = {} MUST_BE_LIST = { - "debug", "concurrency", "plugins", - "report_omit", "report_include", - "run_omit", "run_include", + "debug", + "concurrency", + "plugins", + "report_omit", + "report_include", + "run_omit", + "run_include", "patch", } SERIALIZE_ABSPATH = { - "data_file", "debug_file", "source_dirs", + "data_file", + "debug_file", + "source_dirs", } def from_args(self, **kwargs: TConfigValueIn) -> None: @@ -333,7 +340,9 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) for unknown in set(cp.options(section)) - options: warn( "Unrecognized option '[{}] {}=' in config file {}".format( - real_section, unknown, filename, + real_section, + unknown, + filename, ), ) @@ -369,7 +378,11 @@ def copy(self) -> CoverageConfig: return copy.deepcopy(self) CONCURRENCY_CHOICES: Final[set[str]] = { - "thread", "gevent", "greenlet", "eventlet", "multiprocessing" + "thread", + "gevent", + "greenlet", + "eventlet", + "multiprocessing", } CONFIG_FILE_OPTIONS = [ @@ -444,7 +457,7 @@ def copy(self) -> CoverageConfig: # # [lcov] ("lcov_output", "lcov:output", "file"), - ("lcov_line_checksums", "lcov:line_checksums", "boolean") + ("lcov_line_checksums", "lcov:line_checksums", "boolean"), ] def _set_attr_from_config_option( @@ -501,7 +514,7 @@ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) # See if it's a plugin option. plugin_name, _, key = option_name.partition(":") if key and plugin_name in self.plugins: - self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] + self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] return # If we get here, we didn't find the option. @@ -537,10 +550,7 @@ def get_option(self, option_name: str) -> TConfigValueOut | None: def post_process(self) -> None: """Make final adjustments to settings to make them usable.""" - self.paths = { - k: [process_file_value(f) for f in v] - for k, v in self.paths.items() - } + self.paths = {k: [process_file_value(f) for f in v] for k, v in self.paths.items()} self.exclude_list += self.exclude_also self.partial_list += self.partial_also @@ -550,9 +560,7 @@ def post_process(self) -> None: def debug_info(self) -> list[tuple[str, Any]]: """Make a list of (name, value) pairs for writing debug info.""" - return human_sorted_items( - (k, v) for k, v in self.__dict__.items() if not k.startswith("_") - ) + return human_sorted_items((k, v) for k, v in self.__dict__.items() if not k.startswith("_")) def serialize(self) -> str: """Convert to a string that can be ingested with `deserialize_config`. @@ -560,7 +568,7 @@ def serialize(self) -> str: File paths used by `coverage run` are made absolute to ensure the deserialized config will refer to the same files. """ - data = {k:v for k, v in self.__dict__.items() if not k.startswith("_")} + data = {k: v for k, v in self.__dict__.items() if not k.startswith("_")} for k in self.SERIALIZE_ABSPATH: v = data[k] if isinstance(v, list): @@ -601,7 +609,7 @@ def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]] # True, so make it so. if config_file == ".coveragerc": config_file = True - specified_file = (config_file is not True) + specified_file = config_file is not True if not specified_file: # No file was specified. Check COVERAGE_RCFILE. rcfile = os.getenv("COVERAGE_RCFILE") diff --git a/coverage/control.py b/coverage/control.py index 22ecd1652..7ce36af56 100644 --- a/coverage/control.py +++ b/coverage/control.py @@ -71,6 +71,7 @@ os = isolate_module(os) + @contextlib.contextmanager def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: """Temporarily tweak the configuration of `cov`. @@ -91,6 +92,7 @@ def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: _DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility CONFIG_DATA_PREFIX = ":data:" + class Coverage(TConfigurable): """Programmatic access to coverage.py. @@ -136,7 +138,7 @@ def current(cls) -> Coverage | None: else: return None - def __init__( # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments self, data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE, data_suffix: str | bool | None = None, @@ -318,7 +320,7 @@ def __init__( # pylint: disable=too-many-arguments # Build our configuration from a number of sources. if isinstance(config_file, str) and config_file.startswith(CONFIG_DATA_PREFIX): - self.config = deserialize_config(config_file[len(CONFIG_DATA_PREFIX):]) + self.config = deserialize_config(config_file[len(CONFIG_DATA_PREFIX) :]) else: if not isinstance(config_file, bool): config_file = os.fspath(config_file) @@ -617,8 +619,7 @@ def _init_for_start(self) -> None: self._warn( "Plugin file tracers ({}) aren't supported with {}".format( ", ".join( - plugin._coverage_plugin_name - for plugin in self._plugins.file_tracers + plugin._coverage_plugin_name for plugin in self._plugins.file_tracers ), self._collector.tracer_name(), ), @@ -647,8 +648,9 @@ def _init_for_start(self) -> None: # The Python docs seem to imply that SIGTERM works uniformly even # on Windows, but that's not my experience, and this agrees: # https://stackoverflow.com/questions/35772001/x/35792192#35792192 - self._old_sigterm = signal.signal( # type: ignore[assignment] - signal.SIGTERM, self._on_sigterm, + self._old_sigterm = signal.signal( # type: ignore[assignment] + signal.SIGTERM, + self._on_sigterm, ) def _init_data(self, suffix: str | bool | None) -> None: @@ -728,7 +730,7 @@ def collect(self) -> Iterator[None]: try: yield finally: - self.stop() # pragma: nested + self.stop() # pragma: nested def _atexit(self, event: str = "atexit") -> None: """Clean up on process shutdown.""" @@ -746,8 +748,8 @@ def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> Non self._atexit("sigterm") # Statements after here won't be seen by metacov because we just wrote # the data, and are about to kill the process. - signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered - os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered + signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered + os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered def erase(self) -> None: """Erase previously collected coverage data. @@ -779,7 +781,7 @@ def switch_context(self, new_context: str) -> None: .. versionadded:: 5.0 """ - if not self._started: # pragma: part started + if not self._started: # pragma: part started raise CoverageException("Cannot switch context, coverage is not started") assert self._collector is not None @@ -1029,7 +1031,8 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter: if file_reporter is None: raise PluginError( "Plugin {!r} did not provide a file reporter for {!r}.".format( - plugin._coverage_plugin_name, morf, + plugin._coverage_plugin_name, + morf, ), ) @@ -1059,7 +1062,7 @@ def _get_file_reporters( # Be sure we have a collection. if not isinstance(morfs, (list, tuple, set)): - morfs = [morfs] # type: ignore[list-item] + morfs = [morfs] # type: ignore[list-item] return [(self._get_file_reporter(morf), morf) for morf in morfs] @@ -1372,7 +1375,8 @@ def plugin_info(plugins: list[Any]) -> list[str]: ("configs_attempted", self.config.config_files_attempted), ("configs_read", self.config.config_files_read), ("config_file", self.config.config_file), - ("config_contents", + ( + "config_contents", repr(self.config._config_contents) if self.config._config_contents else "-none-", ), ("data_file", self._data.data_filename() if self._data is not None else "-none-"), @@ -1401,10 +1405,10 @@ def plugin_info(plugins: list[Any]) -> list[str]: # Mega debugging... # $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. -if int(os.getenv("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging +if int(os.getenv("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging from coverage.debug import decorate_methods, show_calls - Coverage = decorate_methods( # type: ignore[misc] + Coverage = decorate_methods( # type: ignore[misc] show_calls(show_args=True), butnot=["get_data"], )(Coverage) @@ -1448,7 +1452,7 @@ def process_startup(*, force: bool = False) -> Coverage | None: return None cov = Coverage(config_file=config_file) - process_startup.coverage = cov # type: ignore[attr-defined] + process_startup.coverage = cov # type: ignore[attr-defined] cov._warn_no_data = False cov._warn_unimported_source = False cov._warn_preimported_source = False diff --git a/coverage/core.py b/coverage/core.py index c5b196788..278faaaa7 100644 --- a/coverage/core.py +++ b/coverage/core.py @@ -25,12 +25,13 @@ try: # Use the C extension code when we can, for speed. import coverage.tracer + CTRACER_FILE: str | None = getattr(coverage.tracer, "__file__", "unknown") except ImportError as imp_err: # Couldn't import the C extension, maybe it isn't built. # We still need to check the environment variable directly here, # as this code runs before configuration is loaded. - if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered + if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered # During testing, we use the COVERAGE_CORE environment variable # to indicate that we've fiddled with the environment to test this # fallback code. If we thought we had a C tracer, but couldn't import diff --git a/coverage/data.py b/coverage/data.py index e99b1d2d4..4d5a4a26e 100644 --- a/coverage/data.py +++ b/coverage/data.py @@ -80,7 +80,7 @@ def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> if os.path.isfile(p): files_to_combine.append(os.path.abspath(p)) elif os.path.isdir(p): - pattern = glob.escape(os.path.join(os.path.abspath(p), local)) +".*" + pattern = glob.escape(os.path.join(os.path.abspath(p), local)) + ".*" files_to_combine.extend(glob.glob(pattern)) else: raise NoDataError(f"Couldn't combine from non-existent path '{p}'") diff --git a/coverage/debug.py b/coverage/debug.py index 218b913e0..b15f3ea36 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -38,7 +38,7 @@ class DebugControl: """Control and output for debugging.""" - show_repr_attr = False # For auto_repr + show_repr_attr = False # For auto_repr def __init__( self, @@ -73,7 +73,7 @@ def should(self, option: str) -> bool: """Decide whether to output debug information in category `option`.""" if option == "callers" and self.suppress_callers: return False - return (option in self.options) + return option in self.options @contextlib.contextmanager def without_callers(self) -> Iterator[None]: @@ -108,6 +108,7 @@ def write(self, msg: str, *, exc: BaseException | None = None) -> None: class NoDebugging(DebugControl): """A replacement for DebugControl that will never try to do anything.""" + def __init__(self) -> None: # pylint: disable=super-init-not-called pass @@ -128,13 +129,14 @@ def write(self, msg: str, *, exc: BaseException | None = None) -> None: class DevNullDebug(NoDebugging): """A DebugControl that won't write anywhere.""" + def write(self, msg: str, *, exc: BaseException | None = None) -> None: pass def info_header(label: str) -> str: """Make a nice header string.""" - return "--{:-<60s}".format(" "+label+" ") + return "--{:-<60s}".format(" " + label + " ") def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]: @@ -158,7 +160,7 @@ def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]: elif isinstance(data, (list, set, tuple)): prefix = "%*s:" % (label_len, label) for e in data: - yield "%*s %s" % (label_len+1, prefix, e) + yield "%*s %s" % (label_len + 1, prefix, e) prefix = "" else: yield "%*s: %s" % (label_len, label, data) @@ -193,20 +195,24 @@ def exc_one_line(exc: Exception) -> str: ] _FILENAME_SUBS: list[tuple[str, str]] = [] + @overload def short_filename(filename: str) -> str: pass + @overload def short_filename(filename: None) -> None: pass + def short_filename(filename: str | None) -> str | None: """Shorten a file name. Directories are replaced by prefixes like 'syspath:'""" if not _FILENAME_SUBS: for pathdir in sys.path: _FILENAME_SUBS.append((pathdir, "syspath:")) import coverage + _FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:")) _FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True) if filename is not None: @@ -289,7 +295,7 @@ def short_stack( def dump_stack_frames(out: TWritable, skip: int = 0) -> None: """Print a summary of the stack to `out`.""" - out.write(short_stack(skip=skip+1) + "\n") + out.write(short_stack(skip=skip + 1) + "\n") def clipped_repr(text: str, numchars: int = 50) -> str: @@ -317,10 +323,12 @@ def add_pid_and_tid(text: str) -> str: AUTO_REPR_IGNORE = {"$coverage.object_id"} + def auto_repr(self: Any) -> str: """A function implementing an automatic __repr__ for debugging.""" show_attrs = ( - (k, v) for k, v in self.__dict__.items() + (k, v) + for k, v in self.__dict__.items() if getattr(v, "show_repr_attr", True) and not inspect.ismethod(v) and k not in AUTO_REPR_IGNORE @@ -332,19 +340,19 @@ def auto_repr(self: Any) -> str: ) -def simplify(v: Any) -> Any: # pragma: debugging +def simplify(v: Any) -> Any: # pragma: debugging """Turn things which are nearly dict/list/etc into dict/list/etc.""" if isinstance(v, dict): - return {k:simplify(vv) for k, vv in v.items()} + return {k: simplify(vv) for k, vv in v.items()} elif isinstance(v, (list, tuple)): return type(v)(simplify(vv) for vv in v) elif hasattr(v, "__dict__"): - return simplify({"."+k: v for k, v in v.__dict__.items()}) + return simplify({"." + k: v for k, v in v.__dict__.items()}) else: return v -def pp(v: Any) -> None: # pragma: debugging +def pp(v: Any) -> None: # pragma: debugging """Debug helper to pretty-print data, including SimpleNamespace objects.""" # Might not be needed in 3.9+ pprint.pprint(simplify(v)) @@ -362,7 +370,7 @@ def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: """ clean_text = text.rstrip() - ending = text[len(clean_text):] + ending = text[len(clean_text) :] text = clean_text for filter_fn in filters: lines = [] @@ -374,6 +382,7 @@ def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: class CwdTracker: """A class to add cwd info to debug messages.""" + def __init__(self) -> None: self.cwd: str | None = None @@ -388,6 +397,7 @@ def filter(self, text: str) -> str: class ProcessTracker: """Track process creation for debug logging.""" + def __init__(self) -> None: self.pid: int = os.getpid() self.did_welcome = False @@ -416,6 +426,7 @@ def filter(self, text: str) -> str: class PytestTracker: """Track the current pytest test name to add to debug messages.""" + def __init__(self) -> None: self.test_name: str | None = None @@ -430,6 +441,7 @@ def filter(self, text: str) -> str: class DebugOutputFile: """A file-like object that includes pid and cwd information.""" + def __init__( self, outfile: IO[str] | None, @@ -482,7 +494,7 @@ def get_one( the_one = cls(fileobj, filters) cls._set_singleton_data(the_one, interim) - if not(the_one.filters): + if not (the_one.filters): the_one.filters = list(filters) return the_one @@ -527,10 +539,10 @@ def flush(self) -> None: self.outfile.flush() -def log(msg: str, stack: bool = False) -> None: # pragma: debugging +def log(msg: str, stack: bool = False) -> None: # pragma: debugging """Write a log message as forcefully as possible.""" out = DebugOutputFile.get_one(interim=True) - out.write(msg+"\n") + out.write(msg + "\n") if stack: dump_stack_frames(out=out, skip=1) @@ -539,9 +551,10 @@ def decorate_methods( decorator: Callable[..., Any], butnot: Iterable[str] = (), private: bool = False, -) -> Callable[..., Any]: # pragma: debugging +) -> Callable[..., Any]: # pragma: debugging """A class decorator to apply a decorator to methods.""" - def _decorator(cls): # type: ignore[no-untyped-def] + + def _decorator(cls): # type: ignore[no-untyped-def] for name, meth in inspect.getmembers(cls, inspect.isroutine): if name not in cls.__dict__: continue @@ -552,17 +565,21 @@ def _decorator(cls): # type: ignore[no-untype continue setattr(cls, name, decorator(meth)) return cls + return _decorator def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging """A function decorator to stop in the debugger for each call.""" + @functools.wraps(func) def _wrapper(*args: Any, **kwargs: Any) -> Any: import pudb + sys.stdout = sys.__stdout__ pudb.set_trace() return func(*args, **kwargs) + return _wrapper @@ -570,12 +587,14 @@ def _wrapper(*args: Any, **kwargs: Any) -> Any: CALLS = itertools.count() OBJ_ID_ATTR = "$coverage.object_id" + def show_calls( show_args: bool = True, show_stack: bool = False, show_return: bool = False, -) -> Callable[..., Any]: # pragma: debugging +) -> Callable[..., Any]: # pragma: debugging """A method decorator to debug-log each call to the function.""" + def _decorator(func: AnyCallable) -> AnyCallable: @functools.wraps(func) def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: @@ -604,7 +623,9 @@ def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" DebugOutputFile.get_one(interim=True).write(msg) return ret + return _wrapper + return _decorator @@ -639,6 +660,6 @@ def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str] val = re.sub(r"\w", "*", val) if name in TRUNCATE: if len(val) > TRUNCATE_LEN: - val = val[:TRUNCATE_LEN-3] + "..." + val = val[: TRUNCATE_LEN - 3] + "..." to_show.append((name, val)) return human_sorted_items(to_show) diff --git a/coverage/disposition.py b/coverage/disposition.py index 7aa15e97a..c120fd684 100644 --- a/coverage/disposition.py +++ b/coverage/disposition.py @@ -32,6 +32,7 @@ def __repr__(self) -> str: # be implemented in either C or Python. Acting on them is done with these # functions. + def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition: """Construct and initialize a new FileDisposition object.""" disp = cls() diff --git a/coverage/env.py b/coverage/env.py index a2b6c4547..fcd8f3a36 100644 --- a/coverage/env.py +++ b/coverage/env.py @@ -35,7 +35,7 @@ if PYPY: # Minimum now is 7.3.16 - PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] + PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] else: PYPYVERSION = (0,) @@ -48,6 +48,7 @@ # Should we default to sys.monitoring? SYSMON_DEFAULT = CPYTHON and PYVERSION >= (3, 14) + # Python behavior. class PYBEHAVIOR: """Flags indicating this Python's behavior.""" @@ -170,7 +171,7 @@ class PYBEHAVIOR: # Does sys.monitoring support BRANCH_RIGHT and BRANCH_LEFT? The names # were added in early 3.14 alphas, but didn't work entirely correctly until # after 3.14.0a5. - branch_right_left = (pep669 and (PYVERSION > (3, 14, 0, "alpha", 5, 0))) + branch_right_left = pep669 and (PYVERSION > (3, 14, 0, "alpha", 5, 0)) # Coverage.py specifics, about testing scenarios. See tests/testenv.py also. @@ -187,11 +188,11 @@ class PYBEHAVIOR: def debug_info() -> Iterable[tuple[str, Any]]: """Return a list of (name, value) pairs for printing debug information.""" info = [ - (name, value) for name, value in globals().items() + (name, value) + for name, value in globals().items() if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS ] info += [ - (name, value) for name, value in PYBEHAVIOR.__dict__.items() - if not name.startswith("_") + (name, value) for name, value in PYBEHAVIOR.__dict__.items() if not name.startswith("_") ] return sorted(info) diff --git a/coverage/exceptions.py b/coverage/exceptions.py index 09c50d67f..27c823686 100644 --- a/coverage/exceptions.py +++ b/coverage/exceptions.py @@ -8,45 +8,55 @@ class _BaseCoverageException(Exception): """The base-base of all Coverage exceptions.""" + pass class CoverageException(_BaseCoverageException): """The base class of all exceptions raised by Coverage.py.""" + pass class ConfigError(_BaseCoverageException): """A problem with a config file, or a value in one.""" + pass class DataError(CoverageException): """An error in using a data file.""" + pass + class NoDataError(CoverageException): """We didn't have data to work with.""" + pass class NoSource(CoverageException): """We couldn't find the source for a module.""" + pass class NoCode(NoSource): """We couldn't find any code at all.""" + pass class NotPython(CoverageException): """A source file turned out not to be parsable Python.""" + pass class PluginError(CoverageException): """A plugin misbehaved.""" + pass @@ -56,9 +66,11 @@ class _ExceptionDuringRun(CoverageException): Construct it with three arguments, the values from `sys.exc_info`. """ + pass class CoverageWarning(Warning): """A warning from Coverage.py.""" + pass diff --git a/coverage/execfile.py b/coverage/execfile.py index c4b08b4eb..d90f38da3 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -26,11 +26,13 @@ PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER + class DummyLoader: """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. Currently only implements the .fullname attribute """ + def __init__(self, fullname: str, *_args: Any) -> None: self.fullname = fullname @@ -56,8 +58,8 @@ def find_module( spec = importlib.util.find_spec(mod_main) if not spec: raise NoSource( - f"No module named {mod_main}; " + - f"{modulename!r} is a package and cannot be directly executed", + f"No module named {mod_main}; " + + f"{modulename!r} is a package and cannot be directly executed", ) pathname = spec.origin packagename = spec.name @@ -71,6 +73,7 @@ class PyRunner: This is meant to emulate real Python execution as closely as possible. """ + def __init__(self, args: list[str], as_module: bool = False) -> None: self.args = args self.as_module = as_module @@ -179,11 +182,11 @@ def run(self) -> None: main_mod.__file__ = main_mod.__file__[:-1] if self.package is not None: main_mod.__package__ = self.package - main_mod.__loader__ = self.loader # type: ignore[assignment] + main_mod.__loader__ = self.loader # type: ignore[assignment] if self.spec is not None: main_mod.__spec__ = self.spec - main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] + main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] sys.modules["__main__"] = main_mod @@ -208,7 +211,7 @@ def run(self) -> None: cwd = os.getcwd() try: exec(code, main_mod.__dict__) - except SystemExit: # pylint: disable=try-except-raise + except SystemExit: # pylint: disable=try-except-raise # The user called sys.exit(). Just pass it along to the upper # layers, where it will be handled. raise @@ -234,7 +237,7 @@ def run(self) -> None: assert err.__traceback__ is not None err.__traceback__ = err.__traceback__.tb_next sys.excepthook(typ, err, tb.tb_next) - except SystemExit: # pylint: disable=try-except-raise + except SystemExit: # pylint: disable=try-except-raise raise except Exception as exc: # Getting the output right in the case of excepthook @@ -313,11 +316,11 @@ def make_code_from_pyc(filename: str) -> CodeType: flags = struct.unpack(" None: """Set the directory that `relative_filename` will be relative to.""" global RELATIVE_DIR, CANONICAL_FILENAME_CACHE @@ -57,7 +58,7 @@ def relative_filename(filename: str) -> str: """ fnorm = os.path.normcase(filename) if fnorm.startswith(RELATIVE_DIR): - filename = filename[len(RELATIVE_DIR):] + filename = filename[len(RELATIVE_DIR) :] return filename @@ -72,7 +73,7 @@ def canonical_filename(filename: str) -> str: if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: - continue # type: ignore[unreachable] + continue # type: ignore[unreachable] f = os.path.join(path, filename) try: exists = os.path.exists(f) @@ -110,7 +111,6 @@ def flat_rootname(filename: str) -> str: if env.WINDOWS: - _ACTUAL_PATH_CACHE: dict[str, str] = {} _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {} @@ -147,6 +147,7 @@ def actual_path(path: str) -> str: return actpath else: + def actual_path(path: str) -> str: """The actual path for non-Windows platforms.""" return path @@ -222,6 +223,7 @@ class TreeMatcher: somewhere in a subtree rooted at one of the directories. """ + def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: self.original_paths: list[str] = human_sorted(paths) self.paths = [os.path.normcase(p) for p in paths] @@ -250,7 +252,8 @@ def match(self, fpath: str) -> bool: class ModuleMatcher: """A matcher for modules in a tree.""" - def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: + + def __init__(self, module_names: Iterable[str], name: str = "unknown") -> None: self.modules = list(module_names) self.name = name @@ -279,6 +282,7 @@ def match(self, module_name: str) -> bool: class GlobMatcher: """A matcher for files by file name pattern.""" + def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: self.pats = list(pats) self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) @@ -326,6 +330,7 @@ def sep(s: str) -> str: ]] # fmt: on + def _glob_to_regex(pattern: str) -> str: """Convert a file-path glob pattern into a regex.""" # Turn all backslashes into slashes to simplify the tokenizer. @@ -335,7 +340,7 @@ def _glob_to_regex(pattern: str) -> str: path_rx = [] pos = 0 while pos < len(pattern): - for rx, sub in G2RX_TOKENS: # pragma: always breaks + for rx, sub in G2RX_TOKENS: # pragma: always breaks if m := rx.match(pattern, pos=pos): if sub is None: raise ConfigError(f"File pattern can't include {m[0]!r}") @@ -371,7 +376,7 @@ def globs_to_regex( flags |= re.IGNORECASE rx = join_regex(map(_glob_to_regex, patterns)) if not partial: - rx = fr"(?:{rx})\Z" + rx = rf"(?:{rx})\Z" compiled = re.compile(rx, flags=flags) return compiled @@ -387,6 +392,7 @@ class PathAliases: map a path through those aliases to produce a unified path. """ + def __init__( self, debugfn: Callable[[str], None] | None = None, @@ -443,7 +449,7 @@ def add(self, pattern: str, result: str) -> None: result = result.rstrip(r"\/") + result_sep self.aliases.append((original_pattern, regex, result)) - def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: + def map(self, path: str, exists: Callable[[str], bool] = source_exists) -> str: """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to @@ -477,13 +483,13 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: new = new[2:] if not exists(new): self.debugfn( - f"Rule {original_pattern!r} changed {path!r} to {new!r} " + - "which doesn't exist, continuing", + f"Rule {original_pattern!r} changed {path!r} to {new!r} " + + "which doesn't exist, continuing", ) continue self.debugfn( - f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + - f"producing {new!r}", + f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + + f"producing {new!r}", ) return new @@ -498,7 +504,7 @@ def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: if len(parts) > 1: dir1 = parts[0] pattern = f"*/{dir1}" - regex_pat = fr"^(.*[\\/])?{re.escape(dir1)}[\\/]" + regex_pat = rf"^(.*[\\/])?{re.escape(dir1)}[\\/]" result = f"{dir1}{os.sep}" # Only add a new pattern if we don't already have this pattern. if not any(p == pattern for p, _, _ in self.aliases): diff --git a/coverage/html.py b/coverage/html.py index e35cabbea..bacd572d1 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -46,8 +46,7 @@ def data_filename(fname: str) -> str: - """Return the path to an "htmlfiles" data file of ours. - """ + """Return the path to an "htmlfiles" data file of ours.""" static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") static_filename = os.path.join(static_dir, fname) return static_filename @@ -69,6 +68,7 @@ def write_html(fname: str, html: str) -> None: @dataclass class LineData: """The data for each source line of HTML output.""" + tokens: list[tuple[str, str]] number: TLineNo category: str @@ -87,6 +87,7 @@ class LineData: @dataclass class FileData: """The data for each source file of HTML output.""" + relative_filename: str nums: Numbers lines: list[LineData] @@ -95,6 +96,7 @@ class FileData: @dataclass class IndexItem: """Information for each index entry, to render an index page.""" + url: str = "" file: str = "" description: str = "" @@ -104,6 +106,7 @@ class IndexItem: @dataclass class IndexPage: """Data for each index page.""" + noun: str plural: str filename: str @@ -186,7 +189,7 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: # I don't understand why this last condition is marked as # partial. If I add an else with an exception, the exception # is raised. - elif first_line in analysis.statements: # pragma: part covered + elif first_line in analysis.statements: # pragma: part covered category2 = "run2" contexts = [] @@ -200,16 +203,18 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: contexts_label = f"{len(contexts)} ctx" context_list = contexts - lines.append(LineData( - tokens=tokens, - number=lineno, - category=category or category2, - contexts=contexts, - contexts_label=contexts_label, - context_list=context_list, - short_annotations=short_annotations, - long_annotations=long_annotations, - )) + lines.append( + LineData( + tokens=tokens, + number=lineno, + category=category or category2, + contexts=contexts, + contexts_label=contexts_label, + context_list=context_list, + short_annotations=short_annotations, + long_annotations=long_annotations, + ) + ) file_data = FileData( relative_filename=fr.relative_filename(), @@ -222,6 +227,7 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: class FileToReport: """A file we're considering reporting.""" + def __init__(self, fr: FileReporter, analysis: Analysis) -> None: self.fr = fr self.analysis = analysis @@ -232,6 +238,7 @@ def __init__(self, fr: FileReporter, analysis: Analysis) -> None: HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~" + @functools.cache def encode_int(n: int) -> str: """Create a short HTML-safe string from an integer, using HTML_SAFE.""" @@ -307,7 +314,6 @@ def __init__(self, cov: Coverage) -> None: "escape": escape, "pair": pair, "len": len, - # Constants for this report. "__url__": __url__, "__version__": coverage.__version__, @@ -317,7 +323,6 @@ def __init__(self, cov: Coverage) -> None: "has_arcs": self.has_arcs, "show_contexts": self.config.show_contexts, "statics": {}, - # Constants for all reports. # These css classes determine which lines are highlighted by default. "category": { @@ -417,7 +422,7 @@ def copy_static_file(self, src: str, slug: str = "") -> None: dest = copy_with_cache_bust(src, self.directory) if not slug: slug = os.path.basename(src).replace(".", "_") - self.template_globals["statics"][slug] = dest # type: ignore + self.template_globals["statics"][slug] = dest # type: ignore def make_local_static_report_files(self) -> None: """Make local instances of static files for HTML report.""" @@ -501,9 +506,8 @@ def write_html_page(self, ftr: FileToReport) -> None: encode_int(context_codes[c_context]) for c_context in ldata.context_list ] code_width = max(len(ec) for ec in encoded_contexts) - ldata.context_str = ( - str(code_width) - + "".join(ec.ljust(code_width) for ec in encoded_contexts) + ldata.context_str = str(code_width) + "".join( + ec.ljust(code_width) for ec in encoded_contexts ) else: ldata.context_str = "" @@ -512,8 +516,7 @@ def write_html_page(self, ftr: FileToReport) -> None: # 202F is NARROW NO-BREAK SPACE. # 219B is RIGHTWARDS ARROW WITH STROKE. ldata.annotate = ",   ".join( - f"{ldata.number} ↛ {d}" - for d in ldata.short_annotations + f"{ldata.number} ↛ {d}" for d in ldata.short_annotations ) else: ldata.annotate = None @@ -532,24 +535,26 @@ def write_html_page(self, ftr: FileToReport) -> None: css_classes = [] if ldata.category: css_classes.append( - self.template_globals["category"][ldata.category], # type: ignore[index] + self.template_globals["category"][ldata.category], # type: ignore[index] ) ldata.css_class = " ".join(css_classes) or "pln" html_path = os.path.join(self.directory, ftr.html_filename) - html = self.source_tmpl.render({ - **file_data.__dict__, - "contexts_json": contexts_json, - "prev_html": ftr.prev_html, - "next_html": ftr.next_html, - }) + html = self.source_tmpl.render( + { + **file_data.__dict__, + "contexts_json": contexts_json, + "prev_html": ftr.prev_html, + "next_html": ftr.next_html, + } + ) write_html(html_path, html) # Save this file's information for the index page. index_info = IndexItem( - url = ftr.html_filename, - file = escape(ftr.fr.relative_filename()), - nums = ftr.analysis.numbers, + url=ftr.html_filename, + file=escape(ftr.fr.relative_filename()), + nums=ftr.analysis.numbers, ) self.index_pages["file"].summaries.append(index_info) self.incr.set_index_info(ftr.rootname, index_info) @@ -587,29 +592,33 @@ def write_region_index_pages(self, files_to_report: Iterable[FileToReport]) -> N if not self.should_report(analysis, page_data): continue sorting_name = region.name.rpartition(".")[-1].lstrip("_") - page_data.summaries.append(IndexItem( - url=f"{ftr.html_filename}#t{region.start}", - file=escape(ftr.fr.relative_filename()), - description=( - f"" - + escape(region.name) - + "" - ), - nums=analysis.numbers, - )) + page_data.summaries.append( + IndexItem( + url=f"{ftr.html_filename}#t{region.start}", + file=escape(ftr.fr.relative_filename()), + description=( + f"" + + escape(region.name) + + "" + ), + nums=analysis.numbers, + ) + ) analysis = ftr.analysis.narrow(outside_lines) if self.should_report(analysis, page_data): - page_data.summaries.append(IndexItem( - url=ftr.html_filename, - file=escape(ftr.fr.relative_filename()), - description=( - "" - + f"(no {escape(noun)})" - + "" - ), - nums=analysis.numbers, - )) + page_data.summaries.append( + IndexItem( + url=ftr.html_filename, + file=escape(ftr.fr.relative_filename()), + description=( + "" + + f"(no {escape(noun)})" + + "" + ), + nums=analysis.numbers, + ) + ) for noun, index_page in self.index_pages.items(): if noun != "file": @@ -659,6 +668,7 @@ def write_index_page(self, index_page: IndexPage, **kwargs: str) -> str: @dataclass class FileInfo: """Summary of the information from last rendering, to avoid duplicate work.""" + hash: str = "" index: IndexItem = field(default_factory=IndexItem) @@ -765,10 +775,7 @@ def write(self) -> None: "format": self.STATUS_FORMAT, "version": coverage.__version__, "globals": self.globals, - "files": { - fname: dataclasses.asdict(finfo) - for fname, finfo in self.files.items() - }, + "files": {fname: dataclasses.asdict(finfo) for fname, finfo in self.files.items()}, } with open(status_file, "w", encoding="utf-8") as fout: json.dump(status_data, fout, separators=(",", ":")) @@ -825,6 +832,7 @@ def set_index_info(self, fname: str, info: IndexItem) -> None: # Helpers for templates and generating HTML + def escape(t: str) -> str: """HTML-escape the text in `t`. diff --git a/coverage/inorout.py b/coverage/inorout.py index a19a0ef24..db598fde6 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -40,7 +40,13 @@ modules_we_happen_to_have: list[ModuleType] = [ - inspect, itertools, os, platform, re, sysconfig, traceback, + inspect, + itertools, + os, + platform, + re, + sysconfig, + traceback, ] if env.PYPY: @@ -50,11 +56,13 @@ # find those directories. import _pypy_irc_topic # pylint: disable=import-error import _structseq # pylint: disable=import-error + modules_we_happen_to_have.extend([_structseq, _pypy_irc_topic]) os = isolate_module(os) + def canonical_path(morf: TMorf, directory: bool = False) -> str: """Return the canonical path of the module or file `morf`. @@ -492,7 +500,8 @@ def warn_already_imported_files(self) -> None: elif self.debug and self.debug.should("trace"): self.debug.write( "Didn't trace already imported file {!r}: {}".format( - disp.original_filename, disp.reason, + disp.original_filename, + disp.reason, ), ) @@ -533,8 +542,7 @@ def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]: Yields pairs: file path, and responsible plug-in name. """ for pkg in self.source_pkgs: - if (pkg not in sys.modules or - not module_has_file(sys.modules[pkg])): + if pkg not in sys.modules or not module_has_file(sys.modules[pkg]): continue pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) yield from self._find_executable_files(canonical_path(pkg_file)) @@ -559,8 +567,8 @@ def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None """ py_files = ( - (py_file, None) for py_file in - find_python_files(src_dir, self.include_namespace_packages) + (py_file, None) + for py_file in find_python_files(src_dir, self.include_namespace_packages) ) plugin_files = self._find_plugin_files(src_dir) @@ -585,9 +593,14 @@ def sys_info(self) -> Iterable[tuple[str, Any]]: ] matcher_names = [ - "source_match", "source_pkgs_match", - "include_match", "omit_match", - "cover_match", "pylib_match", "third_match", "source_in_third_match", + "source_match", + "source_pkgs_match", + "include_match", + "omit_match", + "cover_match", + "pylib_match", + "third_match", + "source_in_third_match", ] for matcher_name in matcher_names: diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py index e2f1f98cb..85d7a973c 100644 --- a/coverage/jsonreport.py +++ b/coverage/jsonreport.py @@ -30,6 +30,7 @@ # 3: add region information (functions, classes) FORMAT_VERSION = 3 + class JsonReporter: """A reporter for writing JSON coverage results.""" diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py index e71388784..247c892a0 100644 --- a/coverage/lcovreport.py +++ b/coverage/lcovreport.py @@ -42,7 +42,7 @@ def lcov_lines( hash_suffix = "" for line in lines: if source_lines: - hash_suffix = "," + line_hash(source_lines[line-1]) + hash_suffix = "," + line_hash(source_lines[line - 1]) # Q: can we get info about the number of times a statement is # executed? If so, that should be recorded here. hit = int(line not in analysis.missing) @@ -70,9 +70,11 @@ def lcov_functions( # suppressions because of https://github.com/pylint-dev/pylint/issues/9923 functions = [ - (min(region.start, min(region.lines)), #pylint: disable=nested-min-max - max(region.start, max(region.lines)), #pylint: disable=nested-min-max - region) + ( + min(region.start, min(region.lines)), # pylint: disable=nested-min-max + max(region.start, max(region.lines)), # pylint: disable=nested-min-max + region, + ) for region in fr.code_regions() if region.kind == "function" and region.lines ] @@ -119,19 +121,13 @@ def lcov_arcs( # When _none_ of the out arcs from 'line' were executed, # it can mean the line always raised an exception. assert len(executed_arcs[line]) == 0 - destinations = [ - (dst, "-") for dst in missing_arcs[line] - ] + destinations = [(dst, "-") for dst in missing_arcs[line]] else: # Q: can we get counts of the number of times each arc was executed? # branch_stats has "total" and "taken" counts for each branch, # but it doesn't have "taken" broken down by destination. - destinations = [ - (dst, "1") for dst in executed_arcs[line] - ] - destinations.extend( - (dst, "0") for dst in missing_arcs[line] - ) + destinations = [(dst, "1") for dst in executed_arcs[line]] + destinations.extend((dst, "0") for dst in missing_arcs[line]) # Sort exit arcs after normal arcs. Exit arcs typically come from # an if statement, at the end of a function, with no else clause. diff --git a/coverage/misc.py b/coverage/misc.py index 29be16e19..399bf1ba0 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -50,11 +50,13 @@ def isolate_module(mod: ModuleType) -> ModuleType: setattr(new_mod, name, value) return ISOLATED_MODULES[mod] + os = isolate_module(os) class SysModuleSaver: """Saves the contents of sys.modules, and removes new modules later.""" + def __init__(self) -> None: self.old_modules = set(sys.modules) @@ -154,6 +156,7 @@ def ensure_dir_for_file(path: str) -> None: class Hasher: """Hashes Python data for fingerprinting.""" + def __init__(self) -> None: self.hash = hashlib.new("sha3_256", usedforsecurity=False) @@ -214,6 +217,7 @@ class DefaultValue: and Sphinx output. """ + def __init__(self, display_as: str) -> None: self.display_as = display_as @@ -257,7 +261,7 @@ def substitute_variables(text: str, variables: Mapping[str, str]) -> str: def dollar_replace(match: re.Match[str]) -> str: """Called for each $replacement.""" # Only one of the dollar_groups will have matched, just get its text. - word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks + word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks if word == "$": return "$" elif word in variables: @@ -273,8 +277,7 @@ def dollar_replace(match: re.Match[str]) -> str: def format_local_datetime(dt: datetime.datetime) -> str: - """Return a string with local timezone representing the date. - """ + """Return a string with local timezone representing the date.""" return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") @@ -307,6 +310,7 @@ def _human_key(s: str) -> tuple[list[str | int], str]: The original string is appended as a last value to ensure the key is unique enough so that "x1y" and "x001y" can be distinguished. """ + def tryint(s: str) -> str | int: """If `s` is a number, return an int, else `s` unchanged.""" try: @@ -316,6 +320,7 @@ def tryint(s: str) -> str | int: return ([tryint(c) for c in re.split(r"(\d+)", s)], s) + def human_sorted(strings: Iterable[str]) -> list[str]: """Sort the given iterable of strings the way that humans expect. @@ -326,8 +331,10 @@ def human_sorted(strings: Iterable[str]) -> list[str]: """ return sorted(strings, key=_human_key) + SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) + def human_sorted_items( items: Iterable[SortableItem], reverse: bool = False, diff --git a/coverage/multiproc.py b/coverage/multiproc.py index 964dee897..1c2d2f7d7 100644 --- a/coverage/multiproc.py +++ b/coverage/multiproc.py @@ -21,16 +21,18 @@ OriginalProcess = multiprocessing.process.BaseProcess -original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] +original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] -class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method + +class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method """A replacement for multiprocess.Process that starts coverage.""" - def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] + def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] """Wrapper around _bootstrap to start coverage.""" debug: DebugControl | None = None try: from coverage import Coverage # avoid circular import + cov = Coverage(data_suffix=True, auto_data=True) cov._warn_preimported_source = False cov.start() @@ -60,8 +62,10 @@ def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] if debug: debug.write("Saved multiprocessing data") + class Stowaway: """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" + def __init__(self, rcfile: str) -> None: self.rcfile = rcfile @@ -85,7 +89,7 @@ def patch_multiprocessing(rcfile: str) -> None: if hasattr(multiprocessing, PATCHED_MARKER): return - OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] + OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] # Set the value in ProcessWithCoverage that will be pickled into the child # process. @@ -99,10 +103,12 @@ def patch_multiprocessing(rcfile: str) -> None: # Windows only spawns, so this is needed to keep Windows working. try: from multiprocessing import spawn + original_get_preparation_data = spawn.get_preparation_data except (ImportError, AttributeError): pass else: + def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]: """Get the original preparation data, and also insert our stowaway.""" d = original_get_preparation_data(name) diff --git a/coverage/numbits.py b/coverage/numbits.py index fd202e089..2b517d0a5 100644 --- a/coverage/numbits.py +++ b/coverage/numbits.py @@ -38,7 +38,7 @@ def nums_to_numbits(nums: Iterable[int]) -> bytes: return b"" b = bytearray(nbytes) for num in nums: - b[num//8] |= 1 << num % 8 + b[num // 8] |= 1 << num % 8 return bytes(b) @@ -58,7 +58,7 @@ def numbits_to_nums(numbits: bytes) -> list[int]: nums = [] for byte_i, byte in enumerate(numbits): for bit_i in range(8): - if (byte & (1 << bit_i)): + if byte & (1 << bit_i): nums.append(byte_i * 8 + bit_i) return nums diff --git a/coverage/parser.py b/coverage/parser.py index 764fcbccd..8b3609cea 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -36,6 +36,7 @@ class PythonParser: involved. """ + def __init__( self, text: str | None = None, @@ -54,6 +55,7 @@ def __init__( self.text: str = text else: from coverage.python import get_python_source + try: self.text = get_python_source(self.filename) except OSError as err: @@ -146,20 +148,23 @@ def _raw_parse(self) -> None: assert self.text is not None tokgen = generate_tokens(self.text) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: - if self.show_tokens: # pragma: debugging - print("%10s %5s %-20r %r" % ( - tokenize.tok_name.get(toktype, toktype), - nice_pair((slineno, elineno)), ttext, ltext, - )) + if self.show_tokens: # pragma: debugging + print( + "%10s %5s %-20r %r" + % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), + ttext, + ltext, + ) + ) if toktype == token.INDENT: indent += 1 elif toktype == token.DEDENT: indent -= 1 elif toktype == token.OP: if ttext == ":" and nesting == 0: - should_exclude = ( - self.excluded.intersection(range(first_line, elineno + 1)) - ) + should_exclude = self.excluded.intersection(range(first_line, elineno + 1)) if not excluding and should_exclude: # Start excluding a suite. We trigger off of the colon # token so that the #pragma comment will be recognized on @@ -176,7 +181,7 @@ def _raw_parse(self) -> None: # We're at the end of a line, and we've ended on a # different line than the first line of the statement, # so record a multi-line range. - for l in range(first_line, elineno+1): + for l in range(first_line, elineno + 1): self._multiline[l] = first_line first_line = 0 @@ -260,12 +265,12 @@ def parse_source(self) -> None: self._raw_parse() except (tokenize.TokenError, IndentationError, SyntaxError) as err: if hasattr(err, "lineno"): - lineno = err.lineno # IndentationError + lineno = err.lineno # IndentationError else: - lineno = err.args[1][0] # TokenError + lineno = err.args[1][0] # TokenError raise NotPython( - f"Couldn't parse '{self.filename}' as Python source: " + - f"{err.args[0]!r} at line {lineno}", + f"Couldn't parse '{self.filename}' as Python source: " + + f"{err.args[0]!r} at line {lineno}", ) from err ignore = self.excluded | self.raw_docstrings @@ -490,6 +495,7 @@ def _find_statements(self) -> Iterable[TLineNo]: # AST analysis # + @dataclass(frozen=True, order=True) class ArcStart: """The information needed to start an arc. @@ -520,12 +526,14 @@ class ArcStart: "line 1 didn't jump to line 2 because the condition on line 1 was never true." """ + lineno: TLineNo cause: str = "" class TAddArcFn(Protocol): """The type for AstArcAnalyzer.add_arc().""" + def __call__( self, start: TLineNo, @@ -548,6 +556,7 @@ def __call__( TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]] + class Block: """ Blocks need to handle various exiting statements in their own ways. @@ -557,6 +566,7 @@ class Block: exits are handled, or False if the search should continue up the block stack. """ + # pylint: disable=unused-argument def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process break exits.""" @@ -577,6 +587,7 @@ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool class LoopBlock(Block): """A block on the block stack representing a `for` or `while` loop.""" + def __init__(self, start: TLineNo) -> None: # The line number where the loop starts. self.start = start @@ -595,6 +606,7 @@ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bo class FunctionBlock(Block): """A block on the block stack representing a function definition.""" + def __init__(self, start: TLineNo, name: str) -> None: # The line number where the function starts. self.start = start @@ -604,7 +616,9 @@ def __init__(self, start: TLineNo, name: str) -> None: def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( - xit.lineno, -self.start, xit.cause, + xit.lineno, + -self.start, + xit.cause, f"except from function {self.name!r}", ) return True @@ -612,7 +626,9 @@ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( - xit.lineno, -self.start, xit.cause, + xit.lineno, + -self.start, + xit.cause, f"return from function {self.name!r}", ) return True @@ -620,6 +636,7 @@ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool class TryBlock(Block): """A block on the block stack representing a `try` block.""" + def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None: # The line number of the first "except" handler, if any. self.handler_start = handler_start @@ -640,9 +657,11 @@ class NodeList(ast.AST): unconditional execution of one of the clauses. """ + def __init__(self, body: Sequence[ast.AST]) -> None: self.body = body - self.lineno = body[0].lineno # type: ignore[attr-defined] + self.lineno = body[0].lineno # type: ignore[attr-defined] + # TODO: Shouldn't the cause messages join with "and" instead of "or"? @@ -708,7 +727,7 @@ def __init__( # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0"))) - if dump_ast: # pragma: debugging + if dump_ast: # pragma: debugging # Dump the AST so that failing tests have helpful output. print(f"Statements: {self.statements}") print(f"Multiline map: {self.multiline}") @@ -802,7 +821,7 @@ def add_arc( action_msg: str | None = None, ) -> None: """Add an arc, including message fragments to use if it is missing.""" - if self.debug: # pragma: debugging + if self.debug: # pragma: debugging print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}") print(short_stack(), end="\n\n") self.arcs.add((start, end)) @@ -830,7 +849,7 @@ def line_for_node(self, node: ast.AST) -> TLineNo: if handler is not None: line = handler(node) else: - line = node.lineno # type: ignore[attr-defined] + line = node.lineno # type: ignore[attr-defined] return self.multiline.get(line, line) # First lines: _line__* @@ -882,8 +901,17 @@ def _line__Module(self, node: ast.Module) -> TLineNo: # The node types that just flow to the next node with no complications. OK_TO_DEFAULT = { - "AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global", - "Import", "ImportFrom", "Nonlocal", "Pass", + "AnnAssign", + "Assign", + "Assert", + "AugAssign", + "Delete", + "Expr", + "Global", + "Import", + "ImportFrom", + "Nonlocal", + "Pass", } def node_exits(self, node: ast.AST) -> set[ArcStart]: @@ -915,7 +943,7 @@ def node_exits(self, node: ast.AST) -> set[ArcStart]: # statement), or it's something we overlooked. if env.TESTING: if node_name not in self.OK_TO_DEFAULT: - raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure + raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure # Default for simple statements: one exit from this node. arc_starts = {ArcStart(self.line_for_node(node))} @@ -1029,10 +1057,10 @@ def _missing__While(self, node: ast.While) -> ast.AST | None: if not body_nodes: return None # Make a synthetic While-true node. - new_while = ast.While() # type: ignore[call-arg] - new_while.lineno = body_nodes.lineno # type: ignore[attr-defined] - new_while.test = ast.Name() # type: ignore[call-arg] - new_while.test.lineno = body_nodes.lineno # type: ignore[attr-defined] + new_while = ast.While() # type: ignore[call-arg] + new_while.lineno = body_nodes.lineno # type: ignore[attr-defined] + new_while.test = ast.Name() # type: ignore[call-arg] + new_while.test.lineno = body_nodes.lineno # type: ignore[attr-defined] new_while.test.id = "True" assert hasattr(body_nodes, "body") new_while.body = body_nodes.body @@ -1056,13 +1084,13 @@ def _missing__While(self, node: ast.While) -> ast.AST | None: def process_break_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being breaks.""" - for block in self.nearest_blocks(): # pragma: always breaks + for block in self.nearest_blocks(): # pragma: always breaks if block.process_break_exits(exits, self.add_arc): break def process_continue_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being continues.""" - for block in self.nearest_blocks(): # pragma: always breaks + for block in self.nearest_blocks(): # pragma: always breaks if block.process_continue_exits(exits, self.add_arc): break @@ -1074,7 +1102,7 @@ def process_raise_exits(self, exits: set[ArcStart]) -> None: def process_return_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being returns.""" - for block in self.nearest_blocks(): # pragma: always breaks + for block in self.nearest_blocks(): # pragma: always breaks if block.process_return_exits(exits, self.add_arc): break @@ -1165,6 +1193,7 @@ def _handle__If(self, node: ast.If) -> set[ArcStart]: return exits if sys.version_info >= (3, 10): + def _handle__Match(self, node: ast.Match) -> set[ArcStart]: start = self.line_for_node(node) last_start = start @@ -1180,15 +1209,13 @@ def _handle__Match(self, node: ast.Match) -> set[ArcStart]: last_start = case_start # case is now the last case, check for wildcard match. - pattern = case.pattern # pylint: disable=undefined-loop-variable + pattern = case.pattern # pylint: disable=undefined-loop-variable while isinstance(pattern, ast.MatchOr): pattern = pattern.patterns[-1] while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None: pattern = pattern.pattern had_wildcard = ( - isinstance(pattern, ast.MatchAs) - and pattern.pattern is None - and case.guard is None # pylint: disable=undefined-loop-variable + isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable ) if not had_wildcard: diff --git a/coverage/patch.py b/coverage/patch.py index c6f55df67..141b4bb72 100644 --- a/coverage/patch.py +++ b/coverage/patch.py @@ -107,6 +107,7 @@ def coverage_execv_patch(*args: Any, **kwargs: Any) -> Any: def _patch_fork(debug: TDebugCtl) -> None: """Ensure Coverage is properly reset after a fork.""" from coverage.control import _after_fork_in_child + if env.WINDOWS: raise CoverageException("patch=fork isn't supported yet on Windows.") @@ -120,10 +121,12 @@ def _patch_subprocess(config: CoverageConfig, debug: TDebugCtl, make_pth_file: b if make_pth_file: pth_files = create_pth_files(debug) + def delete_pth_files() -> None: for p in pth_files: debug.write(f"Deleting subprocess .pth file: {str(p)!r}") p.unlink(missing_ok=True) + atexit.register(delete_pth_files) assert config.config_file is not None os.environ["COVERAGE_PROCESS_CONFIG"] = config.serialize() @@ -145,6 +148,7 @@ def delete_pth_files() -> None: PTH_TEXT = f"import sys; exec({PTH_CODE!r})\n" + def create_pth_files(debug: TDebugCtl = NoDebugging()) -> list[Path]: """Create .pth files for measuring subprocesses.""" pth_files = [] diff --git a/coverage/phystokens.py b/coverage/phystokens.py index 200c237e7..17d128da6 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -55,8 +55,10 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos: if last_ttext.endswith("\\"): inject_backslash = False elif ttype == token.STRING: - if (last_line.endswith("\\\n") and # pylint: disable=simplifiable-if-statement - last_line.rstrip(" \\\n").endswith(last_ttext)): + if ( + last_line.endswith("\\\n") # pylint: disable=simplifiable-if-statement + and last_line.rstrip(" \\\n").endswith(last_ttext) + ): # Deal with special cases like such code:: # # a = ["aaa",\ # there may be zero or more blanks between "," and "\". @@ -75,8 +77,10 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos: ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. yield tokenize.TokenInfo( - 99999, "\\\n", - (slineno, ccol), (slineno, ccol+2), + 99999, + "\\\n", + (slineno, ccol), + (slineno, ccol + 2), last_line, ) last_line = ltext diff --git a/coverage/plugin.py b/coverage/plugin.py index 25d1ffed7..72c3cd9ea 100644 --- a/coverage/plugin.py +++ b/coverage/plugin.py @@ -131,7 +131,7 @@ class CoveragePlugin: _coverage_plugin_name: str _coverage_enabled: bool - def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument + def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument """Get a :class:`FileTracer` object for a file. Plug-in type: file tracer. @@ -173,8 +173,8 @@ def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unu def file_reporter( self, - filename: str, # pylint: disable=unused-argument - ) -> FileReporter | str: # str should be Literal["python"] + filename: str, # pylint: disable=unused-argument + ) -> FileReporter | str: # str should be Literal["python"] """Get the :class:`FileReporter` class to use for a file. Plug-in type: file tracer. @@ -190,7 +190,7 @@ def file_reporter( def dynamic_context( self, - frame: FrameType, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument ) -> str | None: """Get the dynamically computed context label for `frame`. @@ -209,7 +209,7 @@ def dynamic_context( def find_executable_files( self, - src_dir: str, # pylint: disable=unused-argument + src_dir: str, # pylint: disable=unused-argument ) -> Iterable[str]: """Yield all of the executable files in `src_dir`, recursively. @@ -255,6 +255,7 @@ def sys_info(self) -> Iterable[tuple[str, Any]]: class CoveragePluginBase: """Plugins produce specialized objects, which point back to the original plugin.""" + _coverage_plugin: CoveragePlugin @@ -310,8 +311,8 @@ def has_dynamic_source_filename(self) -> bool: def dynamic_source_filename( self, - filename: str, # pylint: disable=unused-argument - frame: FrameType, # pylint: disable=unused-argument + filename: str, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument ) -> str | None: """Get a dynamically computed source file name. @@ -526,7 +527,7 @@ def missing_arc_description( self, start: TLineNo, end: TLineNo, - executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument + executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument ) -> str: """Provide an English sentence describing a missing arc. @@ -544,8 +545,8 @@ def missing_arc_description( def arc_description( self, - start: TLineNo, # pylint: disable=unused-argument - end: TLineNo + start: TLineNo, # pylint: disable=unused-argument + end: TLineNo, ) -> str: """Provide an English description of an arc's effect.""" return f"jump to line {end}" @@ -613,4 +614,4 @@ def __lt__(self, other: Any) -> bool: return isinstance(other, FileReporter) and self.filename < other.filename # This object doesn't need to be hashed. - __hash__ = None # type: ignore[assignment] + __hash__ = None # type: ignore[assignment] diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py index 3e2f15dee..b5ca302d0 100644 --- a/coverage/plugin_support.py +++ b/coverage/plugin_support.py @@ -151,7 +151,7 @@ def add_label(self, label: str) -> LabelledDebug: def message_prefix(self) -> str: """The prefix to use on messages, combining the labels.""" prefixes = self.labels + [""] - return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) + return ":\n".join(" " * i + label for i, label in enumerate(prefixes)) def write(self, message: str) -> None: """Write `message`, but with the labels prepended.""" @@ -227,9 +227,13 @@ def has_dynamic_source_filename(self) -> bool: def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: dyn = self.tracer.dynamic_source_filename(filename, frame) - self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( - filename, self._show_frame(frame), dyn, - )) + self.debug.write( + "dynamic_source_filename({!r}, {}) --> {!r}".format( + filename, + self._show_frame(frame), + dyn, + ) + ) return dyn def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: diff --git a/coverage/python.py b/coverage/python.py index ec5866b66..cbc23e25f 100644 --- a/coverage/python.py +++ b/coverage/python.py @@ -210,10 +210,7 @@ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: def no_branch_lines(self) -> set[TLineNo]: assert self.coverage is not None no_branch = self.parser.lines_matching( - join_regex( - self.coverage.config.partial_list - + self.coverage.config.partial_always_list - ) + join_regex(self.coverage.config.partial_list + self.coverage.config.partial_always_list) ) return no_branch @@ -231,11 +228,7 @@ def missing_arc_description( ) -> str: return self.parser.missing_arc_description(start, end) - def arc_description( - self, - start: TLineNo, - end: TLineNo - ) -> str: + def arc_description(self, start: TLineNo, end: TLineNo) -> str: return self.parser.arc_description(start, end) def source(self) -> str: diff --git a/coverage/pytracer.py b/coverage/pytracer.py index c08a9b2d8..ea178add5 100644 --- a/coverage/pytracer.py +++ b/coverage/pytracer.py @@ -51,6 +51,7 @@ THIS_FILE = __file__.rstrip("co") + class PyTracer(Tracer): """Python implementation of the raw data tracer.""" @@ -126,17 +127,18 @@ def log(self, marker: str, *args: Any) -> None: """For hard-core logging of what this tracer is doing.""" with open("/tmp/debug_trace.txt", "a", encoding="utf-8") as f: f.write(f"{marker} {self.id}[{len(self.data_stack)}]") - if 0: # if you want thread ids.. - f.write(".{:x}.{:x}".format( # type: ignore[unreachable] - self.thread.ident, - self.threading.current_thread().ident, - )) + if 0: # if you want thread ids.. + f.write( + ".{:x}.{:x}".format( # type: ignore[unreachable] + self.thread.ident, + self.threading.current_thread().ident, + ) + ) f.write(" {}".format(" ".join(map(str, args)))) - if 0: # if you want callers.. - f.write(" | ") # type: ignore[unreachable] + if 0: # if you want callers.. + f.write(" | ") # type: ignore[unreachable] stack = " / ".join( - (fname or "???").rpartition("/")[-1] - for _, fname, _, _ in self.data_stack + (fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack ) f.write(stack) f.write("\n") @@ -145,8 +147,8 @@ def _trace( self, frame: FrameType, event: str, - arg: Any, # pylint: disable=unused-argument - lineno: TLineNo | None = None, # pylint: disable=unused-argument + arg: Any, # pylint: disable=unused-argument + lineno: TLineNo | None = None, # pylint: disable=unused-argument ) -> TTraceFn | None: """The trace function passed to sys.settrace.""" @@ -156,11 +158,11 @@ def _trace( # f = frame; code = f.f_code # self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event) - if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable + if self.stopped and sys.gettrace() == self._cached_bound_method_trace: # pylint: disable=comparison-with-callable # The PyTrace.stop() method has been called, possibly by another # thread, let's deactivate ourselves now. if 0: - f = frame # type: ignore[unreachable] + f = frame # type: ignore[unreachable] self.log("---\nX", f.f_code.co_filename, f.f_lineno) while f: self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) @@ -185,12 +187,12 @@ def _trace( if event == "call": # Should we start a new context? if self.should_start_context and self.context is None: - context_maybe = self.should_start_context(frame) # pylint: disable=not-callable + context_maybe = self.should_start_context(frame) # pylint: disable=not-callable if context_maybe is not None: self.context = context_maybe started_context = True assert self.switch_context is not None - self.switch_context(self.context) # pylint: disable=not-callable + self.switch_context(self.context) # pylint: disable=not-callable else: started_context = False else: @@ -279,7 +281,7 @@ def _trace( # It is a real return if we aren't going to resume next. if env.PYBEHAVIOR.lasti_is_yield: lasti += 2 - real_return = (code[lasti] != RESUME) + real_return = code[lasti] != RESUME else: if code[lasti] == RETURN_VALUE: real_return = True @@ -303,7 +305,7 @@ def _trace( if self.started_context: assert self.switch_context is not None self.context = None - self.switch_context(None) # pylint: disable=not-callable + self.switch_context(None) # pylint: disable=not-callable return self._cached_bound_method_trace @@ -338,22 +340,19 @@ def stop(self) -> None: # Called on a different thread than started us: we can't unhook # ourselves, but we've set the flag that we should stop, so we # won't do any more tracing. - #self.log("~", "stopping on different threads") + # self.log("~", "stopping on different threads") return # PyPy clears the trace function before running atexit functions, # so don't warn if we are in atexit on PyPy and the trace function # has changed to None. Metacoverage also messes this up, so don't # warn if we are measuring ourselves. - suppress_warning = ( - (env.PYPY and self.in_atexit and tf is None) - or env.METACOV - ) + suppress_warning = (env.PYPY and self.in_atexit and tf is None) or env.METACOV if self.warn and not suppress_warning: - if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable + if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable self.warn( - "Trace function changed, data is likely wrong: " + - f"{tf!r} != {self._cached_bound_method_trace!r}", + "Trace function changed, data is likely wrong: " + + f"{tf!r} != {self._cached_bound_method_trace!r}", slug="trace-changed", ) diff --git a/coverage/regions.py b/coverage/regions.py index 3b00c401a..e7792103d 100644 --- a/coverage/regions.py +++ b/coverage/regions.py @@ -15,6 +15,7 @@ @dataclasses.dataclass class Context: """The nested named context of a function or class.""" + name: str kind: str lines: set[int] @@ -27,6 +28,7 @@ class RegionFinder: attribute. """ + def __init__(self) -> None: self.regions: list[CodeRegion] = [] self.context: list[Context] = [] diff --git a/coverage/report.py b/coverage/report.py index f49cea1db..7322d3c89 100644 --- a/coverage/report.py +++ b/coverage/report.py @@ -74,10 +74,7 @@ def _report_text( Cover="{:>{n}}", Missing="{:>10}", ) - header_items = [ - formats[item].format(item, name_len=max_name, n=max_n) - for item in header - ] + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] header_str = "".join(header_items) rule = "-" * len(header_str) @@ -89,8 +86,8 @@ def _report_text( for values in lines_values: # build string with line values line_items = [ - formats[item].format(str(value), - name_len=max_name, n=max_n-1) for item, value in zip(header, values) + formats[item].format(str(value), name_len=max_name, n=max_n - 1) + for item, value in zip(header, values) ] self.write_items(line_items) @@ -99,8 +96,8 @@ def _report_text( self.write(rule) line_items = [ - formats[item].format(str(value), - name_len=max_name, n=max_n-1) for item, value in zip(header, total_line) + formats[item].format(str(value), name_len=max_name, n=max_n - 1) + for item, value in zip(header, total_line) ] self.write_items(line_items) @@ -137,8 +134,9 @@ def _report_markdown( max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] header_str = "".join(header_items) - rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] + - ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]], + rule_str = "|" + " ".join( + ["- |".rjust(len(header_items[0]) - 1, "-")] + + ["-: |".rjust(len(item) - 1, "-") for item in header_items[1:]], ) # Write the header @@ -149,7 +147,7 @@ def _report_markdown( # build string with line values formats.update(dict(Cover="{:>{n}}% |")) line_items = [ - formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1) + formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1) for item, value in zip(header, values) ] self.write_items(line_items) @@ -208,7 +206,7 @@ def tabular_report(self) -> None: # `lines_values` is list of lists of sortable values. lines_values = [] - for (fr, analysis) in self.fr_analysis: + for fr, analysis in self.fr_analysis: nums = analysis.numbers args = [fr.relative_filename(), nums.n_statements, nums.n_missing] @@ -250,7 +248,7 @@ def tabular_report(self) -> None: # Create other final lines. end_lines = [] if self.config.skip_covered and self.skipped_count: - file_suffix = "s" if self.skipped_count>1 else "" + file_suffix = "s" if self.skipped_count > 1 else "" end_lines.append( f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.", ) diff --git a/coverage/report_core.py b/coverage/report_core.py index e19117f98..9405f33f4 100644 --- a/coverage/report_core.py +++ b/coverage/report_core.py @@ -65,7 +65,7 @@ def render_report( if file_to_close is not None: file_to_close.close() if delete_file: - file_be_gone(output_path) # pragma: part covered (doesn't return) + file_be_gone(output_path) # pragma: part covered (doesn't return) def get_analysis_to_report( @@ -101,7 +101,7 @@ def get_analysis_to_report( # explicitly suppress those errors. # NotPython is only raised by PythonFileReporter, which has a # should_be_python() method. - if fr.should_be_python(): # type: ignore[attr-defined] + if fr.should_be_python(): # type: ignore[attr-defined] if config.ignore_errors: msg = f"Couldn't parse Python file '{fr.filename}'" coverage._warn(msg, slug="couldnt-parse") diff --git a/coverage/results.py b/coverage/results.py index c27c47bde..163fc902e 100644 --- a/coverage/results.py +++ b/coverage/results.py @@ -41,9 +41,7 @@ def analysis_from_file_reporter( for fromno, tono in arc_possibilities_set: dests[fromno].add(tono) single_dests = { - fromno: list(tonos)[0] - for fromno, tonos in dests.items() - if len(tonos) == 1 + fromno: list(tonos)[0] for fromno, tonos in dests.items() if len(tonos) == 1 } new_arcs = set() for fromno, tono in arcs: @@ -99,8 +97,8 @@ def __post_init__(self) -> None: if self.has_arcs: n_branches = self._total_branches() mba = self.missing_branch_arcs() - n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing) - n_missing_branches = sum(len(v) for k,v in mba.items()) + n_partial_branches = sum(len(v) for k, v in mba.items() if k not in self.missing) + n_missing_branches = sum(len(v) for k, v in mba.items()) else: n_branches = n_partial_branches = n_missing_branches = 0 @@ -128,17 +126,12 @@ def narrow(self, lines: Container[TLineNo]) -> Analysis: if self.has_arcs: arc_possibilities_set = { - (a, b) for a, b in self.arc_possibilities_set - if a in lines or b in lines + (a, b) for a, b in self.arc_possibilities_set if a in lines or b in lines } arcs_executed_set = { - (a, b) for a, b in self.arcs_executed_set - if a in lines or b in lines - } - exit_counts = { - lno: num for lno, num in self.exit_counts.items() - if lno in lines + (a, b) for a, b in self.arcs_executed_set if a in lines or b in lines } + exit_counts = {lno: num for lno, num in self.exit_counts.items() if lno in lines} no_branch = {lno for lno in self.no_branch if lno in lines} else: arc_possibilities_set = set() @@ -177,16 +170,17 @@ def missing_formatted(self, branches: bool = False) -> str: def arcs_missing(self) -> list[TArc]: """Returns a sorted list of the un-executed arcs in the code.""" missing = ( - p for p in self.arc_possibilities - if p not in self.arcs_executed_set - and p[0] not in self.no_branch - and p[1] not in self.excluded + p + for p in self.arc_possibilities + if p not in self.arcs_executed_set + and p[0] not in self.no_branch + and p[1] not in self.excluded ) return sorted(missing) def _branch_lines(self) -> list[TLineNo]: """Returns a list of line numbers that have more than one exit.""" - return [l1 for l1,count in self.exit_counts.items() if count > 1] + return [l1 for l1, count in self.exit_counts.items() if count > 1] def _total_branches(self) -> int: """How many total branches are there?""" @@ -312,7 +306,7 @@ def __add__(self, other: Numbers) -> Numbers: def __radd__(self, other: int) -> Numbers: # Implementing 0+Numbers allows us to sum() a list of Numbers. - assert other == 0 # we only ever call it this way. + assert other == 0 # we only ever call it this way. return self @@ -324,7 +318,7 @@ def display_covered(pc: float, precision: int) -> str: result in either "0" or "100". """ - near0 = 1.0 / 10 ** precision + near0 = 1.0 / 10**precision if 0 < pc < near0: pc = near0 elif (100.0 - near0) < pc < 100: @@ -389,7 +383,7 @@ def format_lines( for line, exits in line_exits: for ex in sorted(exits): if line not in lines and ex not in lines: - dest = (ex if ex > 0 else "exit") + dest = ex if ex > 0 else "exit" line_items.append((line, f"{line}->{dest}")) ret = ", ".join(t[-1] for t in sorted(line_items)) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 5e3d67906..6de07c593 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -109,8 +109,10 @@ ); """ + def _locked(method: AnyCallable) -> AnyCallable: """A decorator for methods that should hold self._lock.""" + @functools.wraps(method) def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: if self._debug.should("lock"): @@ -119,6 +121,7 @@ def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: if self._debug.should("lock"): self._debug.write(f"Locked {self._lock!r} for {method.__name__}") return method(self, *args, **kwargs) + return _wrapped @@ -320,7 +323,8 @@ def _read_db(self) -> None: else: raise DataError( "Data file {!r} doesn't seem to be a coverage data file: {}".format( - self._filename, exc, + self._filename, + exc, ), ) from exc else: @@ -328,7 +332,9 @@ def _read_db(self) -> None: if schema_version != SCHEMA_VERSION: raise DataError( "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( - self._filename, schema_version, SCHEMA_VERSION, + self._filename, + schema_version, + SCHEMA_VERSION, ), ) @@ -353,10 +359,12 @@ def _init_db(self, db: SqliteDb) -> None: ("version", __version__), ] if self._debug.should("process"): - meta_data.extend([ - ("sys_argv", str(getattr(sys, "argv", None))), - ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), - ]) + meta_data.extend( + [ + ("sys_argv", str(getattr(sys, "argv", None))), + ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + ] + ) db.executemany_void("INSERT OR IGNORE INTO meta (key, value) VALUES (?, ?)", meta_data) def _connect(self) -> SqliteDb: @@ -366,7 +374,7 @@ def _connect(self) -> SqliteDb: return self._dbs[threading.get_ident()] def __bool__(self) -> bool: - if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): + if threading.get_ident() not in self._dbs and not os.path.exists(self._filename): return False try: with self._connect() as con: @@ -504,9 +512,13 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: """ if self._debug.should("dataop"): - self._debug.write("Adding lines: %d files, %d lines total" % ( - len(line_data), sum(len(lines) for lines in line_data.values()), - )) + self._debug.write( + "Adding lines: %d files, %d lines total" + % ( + len(line_data), + sum(len(lines) for lines in line_data.values()), + ) + ) if self._debug.should("dataop2"): for filename, linenos in sorted(line_data.items()): self._debug.write(f" {filename}: {linenos}") @@ -544,9 +556,13 @@ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: """ if self._debug.should("dataop"): - self._debug.write("Adding arcs: %d files, %d arcs total" % ( - len(arc_data), sum(len(arcs) for arcs in arc_data.values()), - )) + self._debug.write( + "Adding arcs: %d files, %d arcs total" + % ( + len(arc_data), + sum(len(arcs) for arcs in arc_data.values()), + ) + ) if self._debug.should("dataop2"): for filename, arcs in sorted(arc_data.items()): self._debug.write(f" {filename}: {arcs}") @@ -610,7 +626,9 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: if existing_plugin != plugin_name: raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( - filename, existing_plugin, plugin_name, + filename, + existing_plugin, + plugin_name, ), ) elif plugin_name: @@ -636,7 +654,7 @@ def touch_files(self, filenames: Collection[str], plugin_name: str | None = None if self._debug.should("dataop"): self._debug.write(f"Touching {filenames!r}") self._start_using() - with self._connect(): # Use this to get one transaction. + with self._connect(): # Use this to get one transaction. if not self._has_arcs and not self._has_lines: raise DataError("Can't touch files in an empty CoverageData") @@ -656,7 +674,6 @@ def purge_files(self, filenames: Collection[str]) -> None: self._debug.write(f"Purging data for {filenames!r}") self._start_using() with self._connect() as con: - if self._has_lines: sql = "DELETE FROM line_bits WHERE file_id=?" elif self._has_arcs: @@ -683,9 +700,11 @@ def update( """ if self._debug.should("dataop"): - self._debug.write("Updating with data from {!r}".format( - getattr(other_data, "_filename", "???"), - )) + self._debug.write( + "Updating with data from {!r}".format( + getattr(other_data, "_filename", "???"), + ) + ) if self._has_lines and other_data._has_arcs: raise DataError("Can't combine branch coverage data with statement data") if self._has_arcs and other_data._has_lines: @@ -709,7 +728,9 @@ def update( con.con.create_function("numbits_union", 2, numbits_union) con.con.create_function("map_path", 1, map_path) con.con.create_aggregate( - "numbits_union_agg", 1, NumbitsUnionAgg # type: ignore[arg-type] + "numbits_union_agg", + 1, + NumbitsUnionAgg, # type: ignore[arg-type] ) # Attach the other database @@ -740,7 +761,9 @@ def update( path, this_tracer, other_tracer = conflicts[0] raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( - path, this_tracer, other_tracer, + path, + this_tracer, + other_tracer, ), ) @@ -914,7 +937,7 @@ def file_tracer(self, filename: str) -> str | None: row = con.execute_one("SELECT tracer FROM tracer WHERE file_id = ?", (file_id,)) if row is not None: return row[0] or "" - return "" # File was measured, but no tracer associated. + return "" # File was measured, but no tracer associated. def set_query_context(self, context: str) -> None: """Set a context for subsequent querying. diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py index 21826c801..c4c938728 100644 --- a/coverage/sqlitedb.py +++ b/coverage/sqlitedb.py @@ -28,6 +28,7 @@ class SqliteDb: etc(a, b) """ + def __init__(self, filename: str, debug: TDebugCtl, no_disk: bool = False) -> None: self.debug = debug self.filename = filename @@ -68,8 +69,9 @@ def _connect(self) -> None: # In Python 3.12+, we can change the config to allow journal_mode=off. if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"): # Turn off defensive mode, so that journal_mode=off can succeed. - self.con.setconfig( # type: ignore[attr-defined, unused-ignore] - sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False, + self.con.setconfig( # type: ignore[attr-defined, unused-ignore] + sqlite3.SQLITE_DBCONFIG_DEFENSIVE, + False, ) # This pragma makes writing faster. It disables rollbacks, but we never need them. @@ -97,7 +99,7 @@ def __enter__(self) -> SqliteDb: self.nest += 1 return self - def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] + def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] self.nest -= 1 if self.nest == 0: try: @@ -117,12 +119,12 @@ def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: try: assert self.con is not None try: - return self.con.execute(sql, parameters) # type: ignore[arg-type] + return self.con.execute(sql, parameters) # type: ignore[arg-type] except Exception: # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 - return self.con.execute(sql, parameters) # type: ignore[arg-type] + return self.con.execute(sql, parameters) # type: ignore[arg-type] except sqlite3.Error as exc: msg = str(exc) if not self.no_disk: @@ -133,8 +135,8 @@ def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: cov4_sig = b"!coverage.py: This is a private format" if bad_file.read(len(cov4_sig)) == cov4_sig: msg = ( - "Looks like a coverage 4.x data file. " + - "Are you mixing versions of coverage?" + "Looks like a coverage 4.x data file. " + + "Are you mixing versions of coverage?" ) except Exception: pass @@ -222,9 +224,12 @@ def executemany_void(self, sql: str, data: list[Any]) -> None: def executescript(self, script: str) -> None: """Same as :meth:`python:sqlite3.Connection.executescript`.""" if self.debug.should("sql"): - self.debug.write("Executing script with {} chars: {}".format( - len(script), clipped_repr(script, 100), - )) + self.debug.write( + "Executing script with {} chars: {}".format( + len(script), + clipped_repr(script, 100), + ) + ) assert self.con is not None self.con.executescript(script).close() diff --git a/coverage/sysmon.py b/coverage/sysmon.py index 24be6cbb8..df184a822 100644 --- a/coverage/sysmon.py +++ b/coverage/sysmon.py @@ -294,9 +294,7 @@ def get_stats(self) -> dict[str, int] | None: return self.stats @panopticon("code", "@") - def sysmon_py_start( - self, code: CodeType, instruction_offset: TOffset - ) -> MonitorReturn: + def sysmon_py_start(self, code: CodeType, instruction_offset: TOffset) -> MonitorReturn: """Handle sys.monitoring.events.PY_START events.""" # Entering a new frame. Decide if we should trace in this file. self._activity = True diff --git a/coverage/templite.py b/coverage/templite.py index 73ef54f5d..2cfcc7eb1 100644 --- a/coverage/templite.py +++ b/coverage/templite.py @@ -18,11 +18,13 @@ class TempliteSyntaxError(ValueError): """Raised when a template has a syntax error.""" + pass class TempliteValueError(ValueError): """Raised when an expression won't evaluate in a template.""" + pass @@ -50,7 +52,7 @@ def add_section(self) -> CodeBuilder: self.code.append(section) return section - INDENT_STEP = 4 # PEP8 says so! + INDENT_STEP = 4 # PEP8 says so! def indent(self) -> None: """Increase the current indent for following lines.""" @@ -114,6 +116,7 @@ class Templite: }) """ + def __init__(self, text: str, *contexts: dict[str, Any]) -> None: """Construct a Templite with the given `text`. diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py index 57392b7fb..4120ec9e1 100644 --- a/coverage/tomlconfig.py +++ b/coverage/tomlconfig.py @@ -19,6 +19,7 @@ if env.PYVERSION >= (3, 11, 0, "alpha", 7): import tomllib # pylint: disable=import-error + has_tomllib = True else: # TOML support on Python 3.10 and below is an install-time extra option. @@ -27,11 +28,13 @@ class TomlDecodeError(Exception): """An exception class that exists even when toml isn't installed.""" + pass TWant = TypeVar("TWant") + class TomlConfigParser: """TOML file reading with the interface of HandyConfigParser.""" diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi index d850493ed..8ac412f57 100644 --- a/coverage/tracer.pyi +++ b/coverage/tracer.pyi @@ -9,6 +9,7 @@ from coverage.types import TFileDisposition, TTraceData, TTraceFn, Tracer class CFileDisposition(TFileDisposition): """CFileDisposition is in ctracer/filedisp.c""" + canonical_filename: Any file_tracer: Any has_dynamic_filename: Any @@ -20,6 +21,7 @@ class CFileDisposition(TFileDisposition): class CTracer(Tracer): """CTracer is in ctracer/tracer.c""" + check_include: Any concur_id_func: Any data: TTraceData diff --git a/coverage/types.py b/coverage/types.py index 3c32a5e38..7be1d0179 100644 --- a/coverage/types.py +++ b/coverage/types.py @@ -33,16 +33,18 @@ ## Python tracing + class TTraceFn(Protocol): """A Python trace function.""" + def __call__( self, frame: FrameType, event: str, arg: Any, lineno: TLineNo | None = None, # Our own twist, see collector.py - ) -> TTraceFn | None: - ... + ) -> TTraceFn | None: ... + ## Coverage.py tracing @@ -54,6 +56,7 @@ def __call__( TArc = tuple[TLineNo, TLineNo] + class TFileDisposition(Protocol): """A simple value type for recording what to do with a file.""" @@ -83,6 +86,7 @@ class TFileDisposition(Protocol): TCheckIncludeFn = Callable[[str, FrameType], bool] TShouldStartContextFn = Callable[[FrameType], Union[str, None]] + class Tracer(Protocol): """Anything that can report on Python execution.""" @@ -96,8 +100,7 @@ class Tracer(Protocol): unlock_data: Callable[[], None] warn: TWarnFn - def __init__(self) -> None: - ... + def __init__(self) -> None: ... def start(self) -> TTraceFn | None: """Start this tracer, return a trace function if based on sys.settrace.""" @@ -130,6 +133,7 @@ def get_stats(self) -> dict[str, int] | None: TConfigSectionIn = Mapping[str, TConfigValueIn] TConfigSectionOut = Mapping[str, TConfigValueOut] + class TConfigurable(Protocol): """Something that can proxy to the coverage configuration settings.""" @@ -155,6 +159,7 @@ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) """ + class TPluginConfig(Protocol): """Something that can provide options to a plugin.""" @@ -171,18 +176,21 @@ def get_plugin_options(self, plugin: str) -> TConfigSectionOut: ## Plugins + class TPlugin(Protocol): """What all plugins have in common.""" + _coverage_plugin_name: str _coverage_enabled: bool ## Debugging + class TWarnFn(Protocol): """A callable warn() function.""" - def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: - ... + + def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ... class TDebugCtl(Protocol): diff --git a/coverage/version.py b/coverage/version.py index ce20ac49b..b7ed4c3d4 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -40,9 +40,8 @@ def _make_url( dev: int = 0, ) -> str: """Make the URL people should start at for this version of coverage.py.""" - return ( - "https://coverage.readthedocs.io/en/" - + _make_version(major, minor, micro, releaselevel, serial, dev) + return "https://coverage.readthedocs.io/en/" + _make_version( + major, minor, micro, releaselevel, serial, dev ) diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py index 8d3e48fb6..87055f27c 100644 --- a/coverage/xmlreport.py +++ b/coverage/xmlreport.py @@ -42,6 +42,7 @@ def rate(hit: int, num: int) -> str: @dataclass class PackageData: """Data we keep about each "package" (in Java terms).""" + elements: dict[str, xml.dom.minidom.Element] hits: int lines: int @@ -96,10 +97,12 @@ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) xcoverage = self.xml_out.documentElement assert xcoverage is not None xcoverage.setAttribute("version", __version__) - xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) - xcoverage.appendChild(self.xml_out.createComment( - f" Generated by coverage.py: {__url__} ", - )) + xcoverage.setAttribute("timestamp", str(int(time.time() * 1000))) + xcoverage.appendChild( + self.xml_out.createComment( + f" Generated by coverage.py: {__url__} ", + ) + ) xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) # Call xml_file for each file in the data. @@ -182,14 +185,14 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None if not self.config.relative_files: source_path = files.canonical_filename(source_path) if filename.startswith(source_path.replace("\\", "/") + "/"): - rel_name = filename[len(source_path)+1:] + rel_name = filename[len(source_path) + 1 :] break else: rel_name = fr.relative_filename().replace("\\", "/") - self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) + self.source_paths.add(fr.filename[: -len(rel_name)].rstrip(r"\/")) dirname = os.path.dirname(rel_name) or "." - dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) + dirname = "/".join(dirname.split("/")[: self.config.xml_package_depth]) package_name = dirname.replace("/", ".") package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) @@ -223,7 +226,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None xline.setAttribute("branch", "true") xline.setAttribute( "condition-coverage", - "%d%% (%d/%d)" % (100*taken//total, taken, total), + "%d%% (%d/%d)" % (100 * taken // total, taken, total), ) if line in missing_branch_arcs: annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] diff --git a/doc/cog_helpers.py b/doc/cog_helpers.py index 70161ab0d..4a3f7de0f 100644 --- a/doc/cog_helpers.py +++ b/doc/cog_helpers.py @@ -12,6 +12,7 @@ # pylint: disable=wrong-import-position import os + os.environ["COLUMNS"] = "80" import contextlib @@ -19,7 +20,7 @@ import re import textwrap -import cog # pylint: disable=import-error +import cog # pylint: disable=import-error from coverage.cmdline import CoverageScript from coverage.config import read_coverage_config diff --git a/doc/conf.py b/doc/conf.py index cadd1ac11..a84ef6288 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -23,24 +23,24 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) +# sys.path.append(os.path.abspath('.')) # on_rtd is whether we are on readthedocs.org -on_rtd = os.getenv('READTHEDOCS') == 'True' +on_rtd = os.getenv("READTHEDOCS") == "True" # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'sphinx.ext.ifconfig', - 'sphinx.ext.intersphinx', - 'sphinxcontrib.restbuilder', - 'sphinx.ext.napoleon', - 'sphinx_code_tabs', - 'sphinx_rtd_theme', + "sphinx.ext.autodoc", + "sphinx.ext.todo", + "sphinx.ext.ifconfig", + "sphinx.ext.intersphinx", + "sphinxcontrib.restbuilder", + "sphinx.ext.napoleon", + "sphinx_code_tabs", + "sphinx_rtd_theme", ] autodoc_typehints = "description" @@ -49,23 +49,23 @@ templates_path = [] # The suffix of source filenames. -source_suffix = {'.rst': 'restructuredtext'} +source_suffix = {".rst": "restructuredtext"} # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Coverage.py' +project = "Coverage.py" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # @@@ editable -copyright = "2009–2025, Ned Batchelder" # pylint: disable=redefined-builtin +copyright = "2009–2025, Ned Batchelder" # pylint: disable=redefined-builtin # The short X.Y.Z version. version = "7.10.4" # The full version, including alpha/beta/rc tags. @@ -86,43 +86,43 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_patterns = ["_build", "help/*"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), + "python": ("https://docs.python.org/3", None), } nitpick_ignore = [ @@ -141,49 +141,49 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} -#html_style = "neds.css" -#html_add_permalinks = "" +# html_theme_options = {} +# html_style = "neds.css" +# html_add_permalinks = "" # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = ['_templates'] +# html_theme_path = ['_templates'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = 'media/sleepy-snake-circle-150.png' +html_logo = "media/sleepy-snake-circle-150.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False @@ -192,7 +192,7 @@ html_use_index = False # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False @@ -200,21 +200,21 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '.htm' +# html_file_suffix = '.htm' # Output file base name for HTML help builder. -htmlhelp_basename = 'coveragepydoc' +htmlhelp_basename = "coveragepydoc" # -- Spelling --- if any("spell" in arg for arg in sys.argv): # sphinxcontrib.spelling needs the native "enchant" library, which often is # missing, so only use the extension if we are specifically spell-checking. - extensions += ['sphinxcontrib.spelling'] - names_file = tempfile.NamedTemporaryFile(mode='w', prefix="coverage_names_", suffix=".txt") + extensions += ["sphinxcontrib.spelling"] + names_file = tempfile.NamedTemporaryFile(mode="w", prefix="coverage_names_", suffix=".txt") with open("../CONTRIBUTORS.txt", encoding="utf-8") as contributors: names = set(re.split(r"[^\w']", contributors.read())) names = [n for n in names if len(n) >= 2 and n[0].isupper()] @@ -222,7 +222,7 @@ names_file.flush() atexit.register(os.remove, names_file.name) - spelling_word_list_filename = ['dict.txt', names_file.name] + spelling_word_list_filename = ["dict.txt", names_file.name] spelling_show_suggestions = False @@ -233,11 +233,11 @@ r"https://github.com/nedbat/coveragepy/(issues|pull)/\d+", # When publishing a new version, the docs will refer to the version before # the docs have been published. So don't check those links. - fr"https://coverage.readthedocs.io/en/{release}$", + rf"https://coverage.readthedocs.io/en/{release}$", ] # https://github.com/executablebooks/sphinx-tabs/pull/54 -sphinx_tabs_valid_builders = ['linkcheck'] +sphinx_tabs_valid_builders = ["linkcheck"] # When auto-doc'ing a class, only write the class' docstring into the class docs, # don't automatically include the __init__ docstring. @@ -245,8 +245,9 @@ prerelease = bool(max(release).isalpha()) + def setup(app): """Configure Sphinx""" - app.add_css_file('coverage.css') - app.add_config_value('prerelease', False, 'env') + app.add_css_file("coverage.css") + app.add_config_value("prerelease", False, "env") print("** Prerelease = %r" % prerelease) diff --git a/igor.py b/igor.py index 24b339aa8..54ac8d1bf 100644 --- a/igor.py +++ b/igor.py @@ -317,7 +317,7 @@ def print_banner(label): """Print the version of Python.""" impl = platform.python_implementation() version = platform.python_version() - has_gil = getattr(sys, '_is_gil_enabled', lambda: True)() + has_gil = getattr(sys, "_is_gil_enabled", lambda: True)() if not has_gil: version += "t" if PYPY: @@ -331,7 +331,10 @@ def print_banner(label): def do_quietly(command): """Run a command in a shell, and suppress all output.""" proc = subprocess.run( - command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, + command, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, ) return proc.returncode @@ -386,7 +389,9 @@ def do_edit_for_release(): # NOTICE.txt update_file( - "NOTICE.txt", r"Copyright 2004.*? Ned", f"Copyright 2004-{facts.now:%Y} Ned", + "NOTICE.txt", + r"Copyright 2004.*? Ned", + f"Copyright 2004-{facts.now:%Y} Ned", ) # CHANGES.rst @@ -413,13 +418,16 @@ def do_edit_for_release(): ) update_file("doc/conf.py", r"(?s)# @@@ editable\n.*# @@@ end\n", new_conf) + def do_release_version(): """Set the version to 'final' for a release.""" facts = get_release_facts() rel_vi = facts.vi[:3] + ("final", 0) rel_version = f"version_info = {rel_vi}\n_dev = 0".replace("'", '"') update_file( - "coverage/version.py", r"(?m)^version_info = .*\n_dev = \d+$", rel_version, + "coverage/version.py", + r"(?m)^version_info = .*\n_dev = \d+$", + rel_version, ) @@ -437,7 +445,9 @@ def do_bump_version(): # coverage/version.py next_version = f"version_info = {facts.next_vi}\n_dev = 1".replace("'", '"') update_file( - "coverage/version.py", r"(?m)^version_info = .*\n_dev = \d+$", next_version, + "coverage/version.py", + r"(?m)^version_info = .*\n_dev = \d+$", + next_version, ) @@ -484,6 +494,7 @@ def do_cheats(): def do_copy_with_hash(*args): """Copy files with a cache-busting hash. Used in tests/gold/html/Makefile.""" from coverage.html import copy_with_cache_bust + *srcs, dest_dir = args for src in srcs: copy_with_cache_bust(src, dest_dir) diff --git a/lab/branches.py b/lab/branches.py index c2b838dda..6c251f28d 100644 --- a/lab/branches.py +++ b/lab/branches.py @@ -3,6 +3,7 @@ # Demonstrate some issues with coverage.py branch testing. + def my_function(x): """This isn't real code, just snippets...""" @@ -41,7 +42,7 @@ def my_function(x): if x < 1000: # This branch is always taken print("x is reasonable") - else: # pragma: nocover + else: # pragma: nocover print("this never happens") # try-except structures are complex branches. An except clause with a diff --git a/lab/extract_code.py b/lab/extract_code.py index cf32c1730..ece9cc724 100644 --- a/lab/extract_code.py +++ b/lab/extract_code.py @@ -61,12 +61,12 @@ def f(a, b): if "'''" in line or '"""' in line: break -for end in range(lineno+1, len(lines)): +for end in range(lineno + 1, len(lines)): line = lines[end] if "'''" in line or '"""' in line: break -code = "".join(lines[start+1: end]) +code = "".join(lines[start + 1 : end]) code = textwrap.dedent(code) print(code, end="") diff --git a/lab/goals.py b/lab/goals.py index 13f3f68a5..1ebdb043f 100644 --- a/lab/goals.py +++ b/lab/goals.py @@ -19,9 +19,9 @@ import json import sys -from wcmatch import fnmatch as wcfnmatch # python -m pip install wcmatch +from wcmatch import fnmatch as wcfnmatch # python -m pip install wcmatch -from coverage.results import Numbers # Note: an internal class! +from coverage.results import Numbers # Note: an internal class! def select_files(files, pat): @@ -29,6 +29,7 @@ def select_files(files, pat): selected = [f for f in files if wcfnmatch.fnmatch(f, pat, flags=flags)] return selected + def total_for_files(data, files): total = Numbers(precision=3) for f in files: @@ -44,11 +45,14 @@ def total_for_files(data, files): return total + def main(argv): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--file", "-f", action="store_true", help="Check each file individually") parser.add_argument("--group", "-g", action="store_true", help="Check a group of files") - parser.add_argument("--verbose", "-v", action="store_true", help="Be chatty about what's happening") + parser.add_argument( + "--verbose", "-v", action="store_true", help="Be chatty about what's happening" + ) parser.add_argument("goal", type=float, help="Coverage goal") parser.add_argument("pattern", type=str, nargs="+", help="Patterns to check") args = parser.parse_args(argv) @@ -91,5 +95,6 @@ def main(argv): return 0 if ok else 2 + if __name__ == "__main__": sys.exit(main(sys.argv[1:])) diff --git a/lab/hack_pyc.py b/lab/hack_pyc.py index 60b8459b5..7660c998e 100644 --- a/lab/hack_pyc.py +++ b/lab/hack_pyc.py @@ -1,14 +1,15 @@ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -""" Wicked hack to get .pyc files to do bytecode tracing instead of - line tracing. +"""Wicked hack to get .pyc files to do bytecode tracing instead of +line tracing. """ import marshal, new, opcode, sys, types from lnotab import lnotab_numbers, lnotab_string + class PycFile: def read(self, f): if isinstance(f, basestring): @@ -27,10 +28,11 @@ def write(self, f): def hack_line_numbers(self): self.code = hack_line_numbers(self.code) + def hack_line_numbers(code): - """ Replace a code object's line number information to claim that every - byte of the bytecode is a new source line. Returns a new code - object. Also recurses to hack the line numbers in nested code objects. + """Replace a code object's line number information to claim that every + byte of the bytecode is a new source line. Returns a new code + object. Also recurses to hack the line numbers in nested code objects. """ # Create a new lnotab table. Each opcode is claimed to be at @@ -46,7 +48,7 @@ def hack_line_numbers(code): if old_num and i_byte == old_num[0][0]: line = old_num.pop(0)[1] opnum_in_line = 0 - new_num.append((i_byte, 100000000 + 1000*line + opnum_in_line)) + new_num.append((i_byte, 100000000 + 1000 * line + opnum_in_line)) if ord(code.co_code[i_byte]) >= opcode.HAVE_ARGUMENT: i_byte += 3 else: @@ -54,7 +56,7 @@ def hack_line_numbers(code): opnum_in_line += 1 # new_num is a list of pairs, (byteoff, lineoff). Turn it into an lnotab. - new_firstlineno = new_num[0][1]-1 + new_firstlineno = new_num[0][1] - 1 new_lnotab = lnotab_string(new_num, new_firstlineno) # Recurse into code constants in this code object. @@ -68,18 +70,29 @@ def hack_line_numbers(code): # Create a new code object, just like the old one, except with new # line numbers. new_code = new.code( - code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags, - code.co_code, tuple(new_consts), code.co_names, code.co_varnames, - code.co_filename, code.co_name, new_firstlineno, new_lnotab + code.co_argcount, + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + tuple(new_consts), + code.co_names, + code.co_varnames, + code.co_filename, + code.co_name, + new_firstlineno, + new_lnotab, ) return new_code + def hack_file(f): pyc = PycFile() pyc.read(f) pyc.hack_line_numbers() pyc.write(f) -if __name__ == '__main__': + +if __name__ == "__main__": hack_file(sys.argv[1]) diff --git a/lab/parser.py b/lab/parser.py index 4e817aea6..8c84269c6 100644 --- a/lab/parser.py +++ b/lab/parser.py @@ -3,7 +3,6 @@ """Parser.py: a main for invoking code in coverage/parser.py""" - import collections import dis import glob @@ -25,26 +24,13 @@ def main(self, args): """A main function for trying the code from the command line.""" parser = optparse.OptionParser() + parser.add_option("-d", action="store_true", dest="dis", help="Disassemble") parser.add_option( - "-d", action="store_true", dest="dis", - help="Disassemble" - ) - parser.add_option( - "-R", action="store_true", dest="recursive", - help="Recurse to find source files" - ) - parser.add_option( - "-q", action="store_true", dest="quiet", - help="Suppress output" - ) - parser.add_option( - "-s", action="store_true", dest="source", - help="Show analyzed source" - ) - parser.add_option( - "-t", action="store_true", dest="tokens", - help="Show tokens" + "-R", action="store_true", dest="recursive", help="Recurse to find source files" ) + parser.add_option("-q", action="store_true", dest="quiet", help="Suppress output") + parser.add_option("-s", action="store_true", dest="source", help="Show analyzed source") + parser.add_option("-t", action="store_true", dest="tokens", help="Show tokens") options, args = parser.parse_args() if options.recursive: @@ -77,7 +63,7 @@ def one_file(self, options, filename): text = get_python_source(filename) if start is not None: lines = text.splitlines(True) - text = textwrap.dedent("".join(lines[start-1:end]).replace("\\\\", "\\")) + text = textwrap.dedent("".join(lines[start - 1 : end]).replace("\\\\", "\\")) pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover") pyparser.parse_source() except Exception as err: @@ -102,25 +88,25 @@ def one_file(self, options, filename): exit_counts = pyparser.exit_counts() for lineno, ltext in enumerate(pyparser.text.splitlines(), start=1): - marks = [' '] * 6 - a = ' ' + marks = [" "] * 6 + a = " " if lineno in pyparser.raw_statements: - marks[0] = '-' + marks[0] = "-" if lineno in pyparser.statements: - marks[1] = '=' + marks[1] = "=" exits = exit_counts.get(lineno, 0) if exits > 1: marks[2] = str(exits) if lineno in pyparser.raw_docstrings: marks[3] = '"' if lineno in pyparser.raw_excluded: - marks[4] = 'X' + marks[4] = "X" elif lineno in pyparser.excluded: - marks[4] = '×' + marks[4] = "×" if lineno in pyparser._multiline.values(): - marks[5] = 'o' + marks[5] = "o" elif lineno in pyparser._multiline.keys(): - marks[5] = '.' + marks[5] = "." if arc_chars: a = arc_chars[lineno].ljust(arc_width) @@ -141,34 +127,31 @@ def arc_ascii_art(self, arcs): arc_chars = collections.defaultdict(str) for lfrom, lto in sorted(arcs): if lfrom < 0: - arc_chars[lto] += 'v' + arc_chars[lto] += "v" elif lto < 0: - arc_chars[lfrom] += '^' + arc_chars[lfrom] += "^" else: if lfrom == lto - 1: plus_ones.add(lfrom) - arc_chars[lfrom] += "" # ensure this line is in arc_chars + arc_chars[lfrom] += "" # ensure this line is in arc_chars continue if lfrom < lto: l1, l2 = lfrom, lto else: l1, l2 = lto, lfrom - w = first_all_blanks(arc_chars[l] for l in range(l1, l2+1)) - for l in range(l1, l2+1): + w = first_all_blanks(arc_chars[l] for l in range(l1, l2 + 1)) + for l in range(l1, l2 + 1): if l == lfrom: - ch = '<' + ch = "<" elif l == lto: - ch = '>' + ch = ">" else: - ch = '|' + ch = "|" arc_chars[l] = set_char(arc_chars[l], w, ch) # Add the plusses as the first character for lineno, arcs in arc_chars.items(): - arc_chars[lineno] = ( - ("+" if lineno in plus_ones else " ") + - arcs - ) + arc_chars[lineno] = ("+" if lineno in plus_ones else " ") + arcs return arc_chars @@ -213,7 +196,7 @@ def disassemble(text): def set_char(s, n, c): """Set the nth char of s to be c, extending s if needed.""" s = s.ljust(n) - return s[:n] + c + s[n+1:] + return s[:n] + c + s[n + 1 :] def blanks(s): @@ -233,5 +216,5 @@ def first_all_blanks(ss): return max(len(s) for s in ss) -if __name__ == '__main__': +if __name__ == "__main__": ParserMain().main(sys.argv[1:]) diff --git a/lab/run_sysmon.py b/lab/run_sysmon.py index fa7d44d7b..2747fb63a 100644 --- a/lab/run_sysmon.py +++ b/lab/run_sysmon.py @@ -36,15 +36,18 @@ def bytes_to_lines(code): | events.JUMP ) + def show_off(label, code, instruction_offset): if code.co_filename == the_program: b2l = bytes_to_lines(code) print(f"{label}: {code.co_filename}@{instruction_offset} #{b2l[instruction_offset]}") + def show_line(label, code, line_number): if code.co_filename == the_program: print(f"{label}: {code.co_filename} #{line_number}") + def show_off_off(label, code, instruction_offset, destination_offset): if code.co_filename == the_program: b2l = bytes_to_lines(code) @@ -53,6 +56,7 @@ def show_off_off(label, code, instruction_offset, destination_offset): + f"#{b2l[instruction_offset]}->{b2l[destination_offset]}" ) + def sysmon_py_start(code, instruction_offset): show_off("PY_START", code, instruction_offset) sys.monitoring.set_local_events( diff --git a/lab/run_trace.py b/lab/run_trace.py index dbcd43e49..c2276b62b 100644 --- a/lab/run_trace.py +++ b/lab/run_trace.py @@ -7,6 +7,7 @@ nest = 0 + def trace(frame, event, arg): global nest @@ -15,21 +16,25 @@ def trace(frame, event, arg): return None if the_program in frame.f_code.co_filename: - print("%s%s %s %d @%d" % ( - " " * nest, - event, - os.path.basename(frame.f_code.co_filename), - frame.f_lineno, - frame.f_lasti, - )) - - if event == 'call': + print( + "%s%s %s %d @%d" + % ( + " " * nest, + event, + os.path.basename(frame.f_code.co_filename), + frame.f_lineno, + frame.f_lasti, + ) + ) + + if event == "call": nest += 1 - if event == 'return': + if event == "return": nest -= 1 return trace + print(sys.version) the_program = sys.argv[1] diff --git a/lab/show_pyc.py b/lab/show_pyc.py index 777f9238b..8272b02f1 100644 --- a/lab/show_pyc.py +++ b/lab/show_pyc.py @@ -19,13 +19,12 @@ import warnings - def show_pyc_file(fname): f = open(fname, "rb") magic = f.read(4) print("magic %s" % (binascii.hexlify(magic))) read_date_and_size = True - flags = struct.unpack(' None: """coverage/files.py has some unfortunate globals. Reset them every test.""" set_relative_directory() + @pytest.fixture(autouse=True) def force_local_pyc_files() -> None: """Ensure that .pyc files are written next to source files.""" diff --git a/tests/coveragetest.py b/tests/coveragetest.py index b040b89c1..9c5233616 100644 --- a/tests/coveragetest.py +++ b/tests/coveragetest.py @@ -117,10 +117,10 @@ def start_import_stop( # understand the difference. cov.start() - try: # pragma: nested + try: # pragma: nested # Import the Python file, executing it. mod = import_local_file(modname, modfile) - finally: # pragma: nested + finally: # pragma: nested # Stop coverage.py. cov.stop() return mod @@ -130,15 +130,15 @@ def get_report(self, cov: Coverage, squeeze: bool = True, **kwargs: Any) -> str: repout = io.StringIO() kwargs.setdefault("show_missing", False) cov.report(file=repout, **kwargs) - report = repout.getvalue().replace('\\', '/') - print(report) # When tests fail, it's helpful to see the output + report = repout.getvalue().replace("\\", "/") + print(report) # When tests fail, it's helpful to see the output if squeeze: report = re.sub(r" +", " ", report) return report def get_module_name(self) -> str: """Return a random module name to use for this test run.""" - self.last_module_name = 'coverage_test_' + str(random.random())[2:] + self.last_module_name = "coverage_test_" + str(random.random())[2:] return self.last_module_name def check_coverage( @@ -169,7 +169,7 @@ def check_coverage( Returns the Coverage object, in case you want to poke at it some more. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. # We write the code into a file so that we can import it. # Coverage.py wants to deal with things as modules with file names. @@ -189,7 +189,7 @@ def check_coverage( for exc in excludes or []: cov.exclude(exc) for par in partials or []: - cov.exclude(par, which='partial') + cov.exclude(par, which="partial") mod = self.start_import_stop(cov, modname) @@ -207,7 +207,7 @@ def check_coverage( else: # lines is a list of possible line number lists, one of them # must match. - for i, line_list in enumerate(lines): # pylint: disable=unused-variable + for i, line_list in enumerate(lines): # pylint: disable=unused-variable if statements == line_list: # PYVERSIONS: we might be able to trim down multiple # lines passed into this function. @@ -307,10 +307,11 @@ def assert_warnings( """ __tracebackhide__ = True saved_warnings = [] + def capture_warning( msg: str, slug: str | None = None, - once: bool = False, # pylint: disable=unused-argument + once: bool = False, # pylint: disable=unused-argument ) -> None: """A fake implementation of Coverage._warn, to capture warnings.""" # NOTE: we don't implement `once`. @@ -319,11 +320,11 @@ def capture_warning( saved_warnings.append(msg) original_warn = cov._warn - cov._warn = capture_warning # type: ignore[method-assign] + cov._warn = capture_warning # type: ignore[method-assign] try: yield - except: # pylint: disable=try-except-raise + except: # pylint: disable=try-except-raise raise else: if warnings: @@ -344,7 +345,7 @@ def capture_warning( if saved_warnings: assert False, f"Unexpected warnings: {saved_warnings!r}" finally: - cov._warn = original_warn # type: ignore[method-assign] + cov._warn = original_warn # type: ignore[method-assign] def assert_same_files(self, flist1: Iterable[str], flist2: Iterable[str]) -> None: """Assert that `flist1` and `flist2` are the same set of file names.""" @@ -485,13 +486,13 @@ def working_root(self) -> str: def report_from_command(self, cmd: str) -> str: """Return the report from the `cmd`, with some convenience added.""" - report = self.run_command(cmd).replace('\\', '/') + report = self.run_command(cmd).replace("\\", "/") assert "error" not in report.lower() return report def report_lines(self, report: str) -> list[str]: """Return the lines of the report, as a list.""" - lines = report.split('\n') + lines = report.split("\n") assert lines[-1] == "" return lines[:-1] @@ -514,8 +515,7 @@ def get_measured_filenames(self, coverage_data: CoverageData) -> dict[str, str]: Returns a dict of {filename: absolute path to file} for given CoverageData. """ - return {os.path.basename(filename): filename - for filename in coverage_data.measured_files()} + return {os.path.basename(filename): filename for filename in coverage_data.measured_files()} def get_missing_arc_description(self, cov: Coverage, start: TLineNo, end: TLineNo) -> str: """Get the missing-arc description for a line arc in a coverage run.""" @@ -531,7 +531,7 @@ class UsingModulesMixin: """A mixin for importing modules from tests/modules and tests/moremodules.""" def setUp(self) -> None: - super().setUp() # type: ignore[misc] + super().setUp() # type: ignore[misc] # Parent class saves and restores sys.path, we can just modify it. sys.path.append(nice_file(TESTS_DIR, "modules")) diff --git a/tests/goldtest.py b/tests/goldtest.py index 4ff8276f2..53747b27b 100644 --- a/tests/goldtest.py +++ b/tests/goldtest.py @@ -44,7 +44,7 @@ def compare( alongside the "/gold/" directory, and an assertion will be raised. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. assert os_sep("/gold/") in expected_dir assert os.path.exists(actual_dir) os.makedirs(expected_dir, exist_ok=True) @@ -85,7 +85,7 @@ def save_mismatch(f: str) -> None: expected = scrub(expected, scrubs) actual = scrub(actual, scrubs) if expected != actual: - text_diff.append(f'{expected_file} != {actual_file}') + text_diff.append(f"{expected_file} != {actual_file}") expected_lines = expected.splitlines() actual_lines = actual.splitlines() print(f":::: diff '{expected_file}' and '{actual_file}'") @@ -113,7 +113,7 @@ def contains(filename: str, *strlist: str) -> None: missing in `filename`. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. with open(filename, encoding="utf-8") as fobj: text = fobj.read() for s in strlist: @@ -127,13 +127,11 @@ def contains_rx(filename: str, *rxlist: str) -> None: any lines in `filename`. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. with open(filename, encoding="utf-8") as fobj: lines = fobj.readlines() for rx in rxlist: - assert any(re.search(rx, line) for line in lines), ( - f"Missing regex in {filename}: r{rx!r}" - ) + assert any(re.search(rx, line) for line in lines), f"Missing regex in {filename}: r{rx!r}" def contains_any(filename: str, *strlist: str) -> None: @@ -143,7 +141,7 @@ def contains_any(filename: str, *strlist: str) -> None: `filename`. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. with open(filename, encoding="utf-8") as fobj: text = fobj.read() for s in strlist: @@ -160,7 +158,7 @@ def doesnt_contain(filename: str, *strlist: str) -> None: `filename`. """ - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. with open(filename, encoding="utf-8") as fobj: text = fobj.read() for s in strlist: @@ -169,6 +167,7 @@ def doesnt_contain(filename: str, *strlist: str) -> None: # Helpers + def canonicalize_xml(xtext: str) -> str: """Canonicalize some XML text.""" root = xml.etree.ElementTree.fromstring(xtext) diff --git a/tests/helpers.py b/tests/helpers.py index 31831c4e9..29291a10d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -51,7 +51,7 @@ def subprocess_popen(cmd: str) -> subprocess.Popen[bytes]: # Subprocesses are expensive, but convenient, and so may be over-used in # the test suite. Use these lines to get a list of the tests using them: if 0: # pragma: debugging - pth = "/tmp/processes.txt" # type: ignore[unreachable] + pth = "/tmp/processes.txt" # type: ignore[unreachable] with open(pth, "a", encoding="utf-8") as proctxt: print(os.getenv("PYTEST_CURRENT_TEST", "unknown"), file=proctxt, flush=True) @@ -90,6 +90,7 @@ def run_command(cmd: str) -> tuple[int, str]: # $set_env.py: COVERAGE_DIS - Disassemble test code to /tmp/dis SHOW_DIS = bool(int(os.getenv("COVERAGE_DIS", "0"))) + def make_file( filename: str, text: str = "", @@ -126,10 +127,10 @@ def make_file( os.makedirs(dirs, exist_ok=True) # Create the file. - with open(filename, 'wb') as f: + with open(filename, "wb") as f: f.write(data) - if text and basename.endswith(".py") and SHOW_DIS: # pragma: debugging + if text and basename.endswith(".py") and SHOW_DIS: # pragma: debugging os.makedirs("/tmp/dis", exist_ok=True) with open(f"/tmp/dis/{basename}.dis", "w", encoding="utf-8") as fdis: print(f"# {os.path.abspath(filename)}", file=fdis) @@ -237,9 +238,9 @@ def remove_tree(dirname: str) -> None: # Map chars to numbers for arcz_to_arcs -_arcz_map = {'.': -1} -_arcz_map.update({c: ord(c) - ord('0') for c in '123456789'}) -_arcz_map.update({c: 10 + ord(c) - ord('A') for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}) +_arcz_map = {".": -1} +_arcz_map.update({c: ord(c) - ord("0") for c in "123456789"}) +_arcz_map.update({c: 10 + ord(c) - ord("A") for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}) def arcz_to_arcs(arcz: str) -> list[TArc]: @@ -264,15 +265,15 @@ def arcz_to_arcs(arcz: str) -> list[TArc]: for pair in arcz.split(): asgn = bsgn = 1 if len(pair) == 2: - a, b = pair # type: ignore[misc] + a, b = pair # type: ignore[misc] else: assert len(pair) == 3 if pair[0] == "-": - _, a, b = pair # type: ignore[misc] + _, a, b = pair # type: ignore[misc] asgn = -1 else: assert pair[1] == "-" - a, _, b = pair # type: ignore[misc] + a, _, b = pair # type: ignore[misc] bsgn = -1 arcs.append((asgn * _arcz_map[a], bsgn * _arcz_map[b])) return sorted(arcs) @@ -293,8 +294,10 @@ def change_dir(new_dir: str | Path) -> Iterator[None]: finally: os.chdir(old_dir) + T = TypeVar("T") + def assert_count_equal( a: Iterable[T] | None, b: Iterable[T] | None, @@ -320,7 +323,7 @@ def assert_coverage_warnings( Each msg can be a string compared for equality, or a compiled regex used to search the text. """ - assert msgs # don't call this without some messages. + assert msgs # don't call this without some messages. warns = [w for w in warns if issubclass(w.category, CoverageWarning)] actuals = [cast(Warning, w.message).args[0] for w in warns] assert len(msgs) == len(actuals) @@ -347,6 +350,7 @@ def swallow_warnings( class FailingProxy: """A proxy for another object, but one method will fail a few times before working.""" + def __init__(self, obj: Any, methname: str, fails: list[Exception]) -> None: """Create the failing proxy. @@ -369,13 +373,16 @@ def __getattr__(self, name: str) -> Any: def _make_failing_method(self, exc: Exception) -> Callable[..., NoReturn]: """Return a function that will raise `exc`.""" + def _meth(*args: Any, **kwargs: Any) -> NoReturn: raise exc + return _meth class DebugControlString(DebugControl): """A `DebugControl` that writes to a StringIO, for testing.""" + def __init__(self, options: Iterable[str]) -> None: self.io = io.StringIO() super().__init__(options, self.io) diff --git a/tests/mixins.py b/tests/mixins.py index 0e615cd71..23d7b399f 100644 --- a/tests/mixins.py +++ b/tests/mixins.py @@ -132,6 +132,7 @@ class StdStreamCapturingMixin: invocation will only return the delta. """ + @pytest.fixture(autouse=True) def _capcapsys(self, capsys: pytest.CaptureFixture[str]) -> None: """Grab the fixture so our methods can use it.""" diff --git a/tests/modules/pkg1/__main__.py b/tests/modules/pkg1/__main__.py index 0d38e33e9..dbc2a657a 100644 --- a/tests/modules/pkg1/__main__.py +++ b/tests/modules/pkg1/__main__.py @@ -1,3 +1,4 @@ # Used in the tests for PyRunner import sys + print("pkg1.__main__: passed %s" % sys.argv[1]) diff --git a/tests/modules/pkg1/runmod2.py b/tests/modules/pkg1/runmod2.py index e2778669f..00aeb58e3 100644 --- a/tests/modules/pkg1/runmod2.py +++ b/tests/modules/pkg1/runmod2.py @@ -3,4 +3,5 @@ # Used in the tests for PyRunner import sys + print("runmod2: passed %s" % sys.argv[1]) diff --git a/tests/modules/pkg1/sub/__main__.py b/tests/modules/pkg1/sub/__main__.py index 1af82c43f..c44a22e79 100644 --- a/tests/modules/pkg1/sub/__main__.py +++ b/tests/modules/pkg1/sub/__main__.py @@ -1,3 +1,4 @@ # Used in the tests for PyRunner import sys + print("pkg1.sub.__main__: passed %s" % sys.argv[1]) diff --git a/tests/modules/pkg1/sub/runmod3.py b/tests/modules/pkg1/sub/runmod3.py index d2be9e5c4..65ecd864f 100644 --- a/tests/modules/pkg1/sub/runmod3.py +++ b/tests/modules/pkg1/sub/runmod3.py @@ -3,4 +3,5 @@ # Used in the tests for PyRunner import sys + print("runmod3: passed %s" % sys.argv[1]) diff --git a/tests/modules/plugins/a_plugin.py b/tests/modules/plugins/a_plugin.py index 2122e869f..61c158154 100644 --- a/tests/modules/plugins/a_plugin.py +++ b/tests/modules/plugins/a_plugin.py @@ -14,6 +14,6 @@ class Plugin(CoveragePlugin): def coverage_init( reg: Plugins, - options: Any, # pylint: disable=unused-argument + options: Any, # pylint: disable=unused-argument ) -> None: reg.add_file_tracer(Plugin()) diff --git a/tests/modules/plugins/another.py b/tests/modules/plugins/another.py index a61459031..c9b50e84a 100644 --- a/tests/modules/plugins/another.py +++ b/tests/modules/plugins/another.py @@ -10,12 +10,13 @@ from coverage import CoveragePlugin from coverage.plugin_support import Plugins + class Plugin(CoveragePlugin): pass def coverage_init( reg: Plugins, - options: Any, # pylint: disable=unused-argument + options: Any, # pylint: disable=unused-argument ) -> None: reg.add_file_tracer(Plugin()) diff --git a/tests/modules/process_test/try_execfile.py b/tests/modules/process_test/try_execfile.py index 7b4176215..980162a90 100644 --- a/tests/modules/process_test/try_execfile.py +++ b/tests/modules/process_test/try_execfile.py @@ -33,6 +33,7 @@ # removes duplicate entries from sys.path. So we do that too, since the extra # entries don't affect the running of the program. + def same_file(p1: str, p2: str) -> bool: """Determine if `p1` and `p2` refer to the same existing file.""" if not p1: @@ -48,6 +49,7 @@ def same_file(p1: str, p2: str) -> bool: norm2 = os.path.normcase(os.path.normpath(p2)) return norm1 == norm2 + def without_same_files(filenames: List[str]) -> List[str]: """Return the list `filenames` with duplicates (by same_file) removed.""" reduced: List[str] = [] @@ -56,24 +58,28 @@ def without_same_files(filenames: List[str]) -> List[str]: reduced.append(filename) return reduced + cleaned_sys_path = [os.path.normcase(p) for p in without_same_files(sys.path)] DATA = "xyzzy" import __main__ + def my_function(a: Any) -> str: """A function to force execution of module-level values.""" return f"my_fn({a!r})" + FN_VAL = my_function("fooey") -loader = globals().get('__loader__') -spec = globals().get('__spec__') +loader = globals().get("__loader__") +spec = globals().get("__spec__") # A more compact ad-hoc grouped-by-first-letter list of builtins. CLUMPS = "ABC,DEF,GHI,JKLMN,OPQR,ST,U,VWXYZ_,ab,cd,efg,hij,lmno,pqr,stuvwxyz".split(",") + def word_group(w: str) -> int: """Figure out which CLUMP the first letter of w is in.""" for i, clump in enumerate(CLUMPS): @@ -81,35 +87,41 @@ def word_group(w: str) -> int: return i return 99 + builtin_dir = [" ".join(s) for _, s in itertools.groupby(dir(__builtins__), key=word_group)] globals_to_check = { - 'os.getcwd': os.getcwd(), - '__name__': __name__, - '__file__': os.path.normcase(__file__), - '__doc__': __doc__, - '__builtins__.has_open': hasattr(__builtins__, 'open'), - '__builtins__.dir': builtin_dir, - '__loader__ exists': loader is not None, - '__package__': __package__, - '__spec__ exists': spec is not None, - 'DATA': DATA, - 'FN_VAL': FN_VAL, - '__main__.DATA': getattr(__main__, "DATA", "nothing"), - 'argv0': sys.argv[0], - 'argv1-n': sys.argv[1:], - 'path': cleaned_sys_path, + "os.getcwd": os.getcwd(), + "__name__": __name__, + "__file__": os.path.normcase(__file__), + "__doc__": __doc__, + "__builtins__.has_open": hasattr(__builtins__, "open"), + "__builtins__.dir": builtin_dir, + "__loader__ exists": loader is not None, + "__package__": __package__, + "__spec__ exists": spec is not None, + "DATA": DATA, + "FN_VAL": FN_VAL, + "__main__.DATA": getattr(__main__, "DATA", "nothing"), + "argv0": sys.argv[0], + "argv1-n": sys.argv[1:], + "path": cleaned_sys_path, } if loader is not None: - globals_to_check.update({ - '__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None), - }) + globals_to_check.update( + { + "__loader__.fullname": getattr(loader, "fullname", None) + or getattr(loader, "name", None), + } + ) if spec is not None: - globals_to_check.update({ - '__spec__.' + aname: getattr(spec, aname) - for aname in ['name', 'origin', 'submodule_search_locations', 'parent', 'has_location'] - }) + globals_to_check.update( + { + "__spec__." + aname: getattr(spec, aname) + for aname in ["name", "origin", "submodule_search_locations", "parent", "has_location"] + } + ) print(json.dumps(globals_to_check, indent=4, sort_keys=True)) diff --git a/tests/modules/runmod1.py b/tests/modules/runmod1.py index f79fae83f..11f4d1f3a 100644 --- a/tests/modules/runmod1.py +++ b/tests/modules/runmod1.py @@ -3,4 +3,5 @@ # Used in the tests for PyRunner import sys + print("runmod1: passed %s" % sys.argv[1]) diff --git a/tests/osinfo.py b/tests/osinfo.py index e90d5dcf2..cb540fb22 100644 --- a/tests/osinfo.py +++ b/tests/osinfo.py @@ -14,22 +14,24 @@ def process_ram() -> int: """How much RAM is this process using? (Windows)""" import ctypes from ctypes import wintypes + # From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html # Updated from: https://stackoverflow.com/a/16204942/14343 class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): """Used by GetProcessMemoryInfo""" + _fields_ = [ - ('cb', wintypes.DWORD), - ('PageFaultCount', wintypes.DWORD), - ('PeakWorkingSetSize', ctypes.c_size_t), - ('WorkingSetSize', ctypes.c_size_t), - ('QuotaPeakPagedPoolUsage', ctypes.c_size_t), - ('QuotaPagedPoolUsage', ctypes.c_size_t), - ('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t), - ('QuotaNonPagedPoolUsage', ctypes.c_size_t), - ('PagefileUsage', ctypes.c_size_t), - ('PeakPagefileUsage', ctypes.c_size_t), - ('PrivateUsage', ctypes.c_size_t), + ("cb", wintypes.DWORD), + ("PageFaultCount", wintypes.DWORD), + ("PeakWorkingSetSize", ctypes.c_size_t), + ("WorkingSetSize", ctypes.c_size_t), + ("QuotaPeakPagedPoolUsage", ctypes.c_size_t), + ("QuotaPagedPoolUsage", ctypes.c_size_t), + ("QuotaPeakNonPagedPoolUsage", ctypes.c_size_t), + ("QuotaNonPagedPoolUsage", ctypes.c_size_t), + ("PagefileUsage", ctypes.c_size_t), + ("PeakPagefileUsage", ctypes.c_size_t), + ("PrivateUsage", ctypes.c_size_t), ] GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo @@ -50,15 +52,15 @@ class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): ctypes.byref(counters), ctypes.sizeof(counters), ) - if not ret: # pragma: part covered - return 0 # pragma: cant happen + if not ret: # pragma: part covered + return 0 # pragma: cant happen return counters.PrivateUsage elif sys.platform.startswith("linux"): # Linux implementation import os - _scale = {'kb': 1024, 'mb': 1024*1024} + _scale = {"kb": 1024, "mb": 1024 * 1024} def _VmB(key: str) -> int: """Read the /proc/PID/status file to find memory use.""" @@ -66,23 +68,24 @@ def _VmB(key: str) -> int: # Get pseudo file /proc//status with open(f"/proc/{os.getpid()}/status", encoding="utf-8") as t: v = t.read() - except OSError: # pragma: cant happen - return 0 # non-Linux? + except OSError: # pragma: cant happen + return 0 # non-Linux? # Get VmKey line e.g. 'VmRSS: 9999 kB\n ...' i = v.index(key) vp = v[i:].split(None, 3) - if len(vp) < 3: # pragma: part covered - return 0 # pragma: cant happen + if len(vp) < 3: # pragma: part covered + return 0 # pragma: cant happen # Convert Vm value to bytes. return int(float(vp[1]) * _scale[vp[2].lower()]) def process_ram() -> int: """How much RAM is this process using? (Linux implementation)""" - return _VmB('VmRSS') + return _VmB("VmRSS") else: # Generic implementation. def process_ram() -> int: """How much RAM is this process using? (stdlib implementation)""" import resource + return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff --git a/tests/plugin1.py b/tests/plugin1.py index 6d0b27f41..b2116e841 100644 --- a/tests/plugin1.py +++ b/tests/plugin1.py @@ -14,6 +14,7 @@ from coverage.plugin_support import Plugins from coverage.types import TLineNo + class Plugin(CoveragePlugin): """A file tracer plugin to import, so that it isn't in the test's current directory.""" @@ -44,18 +45,19 @@ def source_filename(self) -> str: def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: """Map the line number X to X05,X06,X07.""" lineno = frame.f_lineno - return lineno*100+5, lineno*100+7 + return lineno * 100 + 5, lineno * 100 + 7 class MyFileReporter(FileReporter): """Dead-simple FileReporter.""" + def lines(self) -> set[TLineNo]: return {105, 106, 107, 205, 206, 207} def coverage_init( reg: Plugins, - options: Any, # pylint: disable=unused-argument + options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_file_tracer(Plugin()) diff --git a/tests/plugin2.py b/tests/plugin2.py index 07cce1c9f..b9b408735 100644 --- a/tests/plugin2.py +++ b/tests/plugin2.py @@ -15,7 +15,7 @@ from coverage.types import TLineNo try: - import third.render # pylint: disable=unused-import + import third.render # pylint: disable=unused-import except ImportError: # This plugin is used in a few tests. One of them has the third.render # module, but most don't. We need to import it but not use it, so just @@ -25,6 +25,7 @@ class Plugin(CoveragePlugin): """A file tracer plugin for testing.""" + def file_tracer(self, filename: str) -> FileTracer | None: if "render.py" in filename: return RenderFileTracer() @@ -47,26 +48,27 @@ def dynamic_source_filename( ) -> str | None: if frame.f_code.co_name != "render": return None - source_filename: str = os.path.abspath(frame.f_locals['filename']) + source_filename: str = os.path.abspath(frame.f_locals["filename"]) return source_filename def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: - lineno = frame.f_locals['linenum'] - return lineno, lineno+1 + lineno = frame.f_locals["linenum"] + return lineno, lineno + 1 class MyFileReporter(FileReporter): """A goofy file reporter.""" + def lines(self) -> set[TLineNo]: # Goofy test arrangement: claim that the file has as many lines as the # number in its name. num = os.path.basename(self.filename).split(".")[0].split("_")[1] - return set(range(1, int(num)+1)) + return set(range(1, int(num) + 1)) def coverage_init( reg: Plugins, - options: Any, # pylint: disable=unused-argument + options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_file_tracer(Plugin()) diff --git a/tests/plugin_config.py b/tests/plugin_config.py index a32f485d9..dc0ae8a1d 100644 --- a/tests/plugin_config.py +++ b/tests/plugin_config.py @@ -14,6 +14,7 @@ class Plugin(coverage.CoveragePlugin): """A configuring plugin for testing.""" + def configure(self, config: TConfigurable) -> None: """Configure all the things!""" opt_name = "report:exclude_lines" @@ -25,7 +26,7 @@ def configure(self, config: TConfigurable) -> None: def coverage_init( reg: Plugins, - options: Any, # pylint: disable=unused-argument + options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_configurer(Plugin()) diff --git a/tests/select_plugin.py b/tests/select_plugin.py index 4239608fc..fef0e1b3c 100644 --- a/tests/select_plugin.py +++ b/tests/select_plugin.py @@ -25,13 +25,11 @@ def pytest_addoption(parser): ) -def pytest_collection_modifyitems(config, items): # pragma: debugging +def pytest_collection_modifyitems(config, items): # pragma: debugging """Run an external command to get a list of tests to run.""" select_cmd = config.getoption("--select-cmd") if select_cmd: output = subprocess.check_output(select_cmd, shell="True").decode("utf-8") - test_nodeids = { - nodeid: seq for seq, nodeid in enumerate(output.splitlines()) - } + test_nodeids = {nodeid: seq for seq, nodeid in enumerate(output.splitlines())} new_items = [item for item in items if item.nodeid in test_nodeids] items[:] = sorted(new_items, key=lambda item: test_nodeids[item.nodeid]) diff --git a/tests/test_annotate.py b/tests/test_annotate.py index 3819d6878..639fecbca 100644 --- a/tests/test_annotate.py +++ b/tests/test_annotate.py @@ -16,27 +16,36 @@ class AnnotationGoldTest(CoverageTest): def make_multi(self) -> None: """Make a few source files we need for the tests.""" - self.make_file("multi.py", """\ + self.make_file( + "multi.py", + """\ import a.a import b.b a.a.a(1) b.b.b(2) - """) + """, + ) self.make_file("a/__init__.py") - self.make_file("a/a.py", """\ + self.make_file( + "a/a.py", + """\ def a(x): if x == 1: print("x is 1") else: print("x is not 1") - """) + """, + ) self.make_file("b/__init__.py") - self.make_file("b/b.py", """\ + self.make_file( + "b/b.py", + """\ def b(x): msg = f"x is {x}" print(msg) - """) + """, + ) def test_multi(self) -> None: self.make_multi() @@ -53,19 +62,24 @@ def test_annotate_dir(self) -> None: compare(gold_path("annotate/anno_dir"), "out_anno_dir", "*,cover") def test_encoding(self) -> None: - self.make_file("utf8.py", """\ + self.make_file( + "utf8.py", + """\ # -*- coding: utf-8 -*- # This comment has an accent: Ê print("spam eggs") - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "utf8") cov.annotate() compare(gold_path("annotate/encodings"), ".", "*,cover") def test_white(self) -> None: - self.make_file("white.py", """\ + self.make_file( + "white.py", + """\ # A test case sent to me by Steve White def f(self): @@ -99,7 +113,8 @@ def h(x): a = 2 h(2) - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "white") @@ -107,7 +122,9 @@ def h(x): compare(gold_path("annotate/white"), ".", "*,cover") def test_missing_after_else(self) -> None: - self.make_file("mae.py", """\ + self.make_file( + "mae.py", + """\ def f(x): if x == 1: print("1") @@ -118,7 +135,8 @@ def f(x): print("nope") if f(2): print("nope") - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "mae") diff --git a/tests/test_api.py b/tests/test_api.py index 6aaa4178b..9c6816158 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -35,6 +35,7 @@ BAD_SQLITE_REGEX = r"file( is encrypted or)? is not a database" + class ApiTest(CoverageTest): """Api-oriented tests for coverage.py.""" @@ -58,17 +59,23 @@ def assertFiles(self, files: list[str]) -> None: def test_unexecuted_file(self) -> None: cov = coverage.Coverage() - self.make_file("mycode.py", """\ + self.make_file( + "mycode.py", + """\ a = 1 b = 2 if b == 3: c = 4 d = 5 - """) + """, + ) - self.make_file("not_run.py", """\ + self.make_file( + "not_run.py", + """\ fooey = 17 - """) + """, + ) # Import the Python file, executing it. self.start_import_stop(cov, "mycode") @@ -78,14 +85,20 @@ def test_unexecuted_file(self) -> None: assert missing == [1] def test_filenames(self) -> None: - self.make_file("mymain.py", """\ + self.make_file( + "mymain.py", + """\ import mymod a = 1 - """) + """, + ) - self.make_file("mymod.py", """\ + self.make_file( + "mymod.py", + """\ fooey = 17 - """) + """, + ) # Import the Python file, executing it. cov = coverage.Coverage() @@ -118,11 +131,14 @@ def test_filenames(self) -> None: @pytest.mark.parametrize("cover_pylib", [False, True]) def test_stdlib(self, cover_pylib: bool) -> None: - self.make_file("mymain.py", """\ + self.make_file( + "mymain.py", + """\ import colorsys a = 1 hls = colorsys.rgb_to_hls(1.0, 0.5, 0.0) - """) + """, + ) # Measure without the stdlib. cov1 = coverage.Coverage(cover_pylib=cover_pylib) @@ -139,12 +155,15 @@ def test_stdlib(self, cover_pylib: bool) -> None: assert statements == missing def test_include_can_measure_stdlib(self) -> None: - self.make_file("mymain.py", """\ + self.make_file( + "mymain.py", + """\ import colorsys, random a = 1 r, g, b = [random.random() for _ in range(3)] hls = colorsys.rgb_to_hls(r, g, b) - """) + """, + ) # Measure without the stdlib, but include colorsys. cov1 = coverage.Coverage(cover_pylib=False, include=["*/colorsys.py"]) @@ -165,48 +184,51 @@ def test_exclude_list(self) -> None: assert cov.get_exclude_list() == ["foo"] cov.exclude("bar") assert cov.get_exclude_list() == ["foo", "bar"] - assert cov._exclude_regex('exclude') == "(?:foo)|(?:bar)" + assert cov._exclude_regex("exclude") == "(?:foo)|(?:bar)" cov.clear_exclude() assert cov.get_exclude_list() == [] def test_exclude_partial_list(self) -> None: cov = coverage.Coverage() - cov.clear_exclude(which='partial') - assert cov.get_exclude_list(which='partial') == [] - cov.exclude("foo", which='partial') - assert cov.get_exclude_list(which='partial') == ["foo"] - cov.exclude("bar", which='partial') - assert cov.get_exclude_list(which='partial') == ["foo", "bar"] - assert cov._exclude_regex(which='partial') == "(?:foo)|(?:bar)" - cov.clear_exclude(which='partial') - assert cov.get_exclude_list(which='partial') == [] + cov.clear_exclude(which="partial") + assert cov.get_exclude_list(which="partial") == [] + cov.exclude("foo", which="partial") + assert cov.get_exclude_list(which="partial") == ["foo"] + cov.exclude("bar", which="partial") + assert cov.get_exclude_list(which="partial") == ["foo", "bar"] + assert cov._exclude_regex(which="partial") == "(?:foo)|(?:bar)" + cov.clear_exclude(which="partial") + assert cov.get_exclude_list(which="partial") == [] def test_exclude_and_partial_are_separate_lists(self) -> None: cov = coverage.Coverage() - cov.clear_exclude(which='partial') - cov.clear_exclude(which='exclude') - cov.exclude("foo", which='partial') - assert cov.get_exclude_list(which='partial') == ['foo'] - assert cov.get_exclude_list(which='exclude') == [] - cov.exclude("bar", which='exclude') - assert cov.get_exclude_list(which='partial') == ['foo'] - assert cov.get_exclude_list(which='exclude') == ['bar'] - cov.exclude("p2", which='partial') - cov.exclude("e2", which='exclude') - assert cov.get_exclude_list(which='partial') == ['foo', 'p2'] - assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] - cov.clear_exclude(which='partial') - assert cov.get_exclude_list(which='partial') == [] - assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] - cov.clear_exclude(which='exclude') - assert cov.get_exclude_list(which='partial') == [] - assert cov.get_exclude_list(which='exclude') == [] + cov.clear_exclude(which="partial") + cov.clear_exclude(which="exclude") + cov.exclude("foo", which="partial") + assert cov.get_exclude_list(which="partial") == ["foo"] + assert cov.get_exclude_list(which="exclude") == [] + cov.exclude("bar", which="exclude") + assert cov.get_exclude_list(which="partial") == ["foo"] + assert cov.get_exclude_list(which="exclude") == ["bar"] + cov.exclude("p2", which="partial") + cov.exclude("e2", which="exclude") + assert cov.get_exclude_list(which="partial") == ["foo", "p2"] + assert cov.get_exclude_list(which="exclude") == ["bar", "e2"] + cov.clear_exclude(which="partial") + assert cov.get_exclude_list(which="partial") == [] + assert cov.get_exclude_list(which="exclude") == ["bar", "e2"] + cov.clear_exclude(which="exclude") + assert cov.get_exclude_list(which="partial") == [] + assert cov.get_exclude_list(which="exclude") == [] def test_datafile_default(self) -> None: # Default data file behavior: it's .coverage - self.make_file("datatest1.py", """\ + self.make_file( + "datatest1.py", + """\ fooey = 17 - """) + """, + ) self.assertFiles(["datatest1.py"]) cov = coverage.Coverage() @@ -217,9 +239,12 @@ def test_datafile_default(self) -> None: @pytest.mark.parametrize("file_class", FilePathClasses) def test_datafile_specified(self, file_class: FilePathType) -> None: # You can specify the data file name. - self.make_file("datatest2.py", """\ + self.make_file( + "datatest2.py", + """\ fooey = 17 - """) + """, + ) self.assertFiles(["datatest2.py"]) cov = coverage.Coverage(data_file=file_class("cov.data")) @@ -230,9 +255,12 @@ def test_datafile_specified(self, file_class: FilePathType) -> None: @pytest.mark.parametrize("file_class", FilePathClasses) def test_datafile_and_suffix_specified(self, file_class: FilePathType) -> None: # You can specify the data file name and suffix. - self.make_file("datatest3.py", """\ + self.make_file( + "datatest3.py", + """\ fooey = 17 - """) + """, + ) self.assertFiles(["datatest3.py"]) cov = coverage.Coverage(data_file=file_class("cov.data"), data_suffix="14") @@ -242,13 +270,19 @@ def test_datafile_and_suffix_specified(self, file_class: FilePathType) -> None: def test_datafile_from_rcfile(self) -> None: # You can specify the data file name in the .coveragerc file - self.make_file("datatest4.py", """\ + self.make_file( + "datatest4.py", + """\ fooey = 17 - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] data_file = mydata.dat - """) + """, + ) self.assertFiles(["datatest4.py", ".coveragerc"]) cov = coverage.Coverage() @@ -268,8 +302,8 @@ def test_deep_datafile(self) -> None: def test_datafile_none(self) -> None: cov = coverage.Coverage(data_file=None) - def f1() -> None: # pragma: nested - a = 1 # pylint: disable=unused-variable + def f1() -> None: # pragma: nested + a = 1 # pylint: disable=unused-variable one_line_number = f1.__code__.co_firstlineno + 1 lines = [] @@ -317,8 +351,8 @@ def test_completely_zero_reporting(self) -> None: def test_cov4_data_file(self) -> None: cov4_data = ( - "!coverage.py: This is a private format, don't read it directly!" + - '{"lines":{"/somewhere/not/really.py":[1,5,2,3]}}' + "!coverage.py: This is a private format, don't read it directly!" + + '{"lines":{"/somewhere/not/really.py":[1,5,2,3]}}' ) self.make_file(".coverage", cov4_data) cov = coverage.Coverage() @@ -328,13 +362,19 @@ def test_cov4_data_file(self) -> None: def make_code1_code2(self) -> None: """Create the code1.py and code2.py files.""" - self.make_file("code1.py", """\ + self.make_file( + "code1.py", + """\ code1 = 1 - """) - self.make_file("code2.py", """\ + """, + ) + self.make_file( + "code2.py", + """\ code2 = 1 code2 = 2 - """) + """, + ) def check_code1_code2(self, cov: Coverage) -> None: """Check the analysis is correct for code1.py and code2.py.""" @@ -415,9 +455,7 @@ def test_combining_corrupt_data(self) -> None: self.make_good_data_files() self.make_file(".coverage.foo", """La la la, this isn't coverage data!""") cov = coverage.Coverage() - warning_regex = ( - r"Couldn't use data file '.*\.coverage\.foo': " + BAD_SQLITE_REGEX - ) + warning_regex = r"Couldn't use data file '.*\.coverage\.foo': " + BAD_SQLITE_REGEX with self.assert_warnings(cov, [warning_regex]): cov.combine() @@ -479,8 +517,8 @@ def make_files() -> None: self.make_data_file( basename=".coverage.1", lines={ - abs_file('ci/girder/g1.py'): range(10), - abs_file('ci/girder/plugins/p1.py'): range(10), + abs_file("ci/girder/g1.py"): range(10), + abs_file("ci/girder/plugins/p1.py"): range(10), }, ) @@ -495,7 +533,9 @@ def get_combined_filenames() -> set[str]: # Case 1: get the order right. make_files() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [paths] plugins = plugins/ @@ -503,12 +543,15 @@ def get_combined_filenames() -> set[str]: girder = girder/ ci/girder/ - """) - assert get_combined_filenames() == {'girder/g1.py', 'plugins/p1.py'} + """, + ) + assert get_combined_filenames() == {"girder/g1.py", "plugins/p1.py"} # Case 2: get the order "wrong". make_files() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [paths] girder = girder/ @@ -516,14 +559,18 @@ def get_combined_filenames() -> set[str]: plugins = plugins/ ci/girder/plugins/ - """) - assert get_combined_filenames() == {'girder/g1.py', 'plugins/p1.py'} + """, + ) + assert get_combined_filenames() == {"girder/g1.py", "plugins/p1.py"} def test_warnings(self) -> None: - self.make_file("hello.py", """\ + self.make_file( + "hello.py", + """\ import sys, os print("Hello") - """) + """, + ) with pytest.warns(Warning) as warns: cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) self.start_import_stop(cov, "hello") @@ -539,14 +586,20 @@ def test_warnings(self) -> None: ) def test_warnings_suppressed(self) -> None: - self.make_file("hello.py", """\ + self.make_file( + "hello.py", + """\ import sys, os print("Hello") - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] disable_warnings = no-data-collected, module-not-imported - """) + """, + ) with pytest.warns(Warning) as warns: cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) self.start_import_stop(cov, "hello") @@ -571,10 +624,13 @@ def test_source_and_include_dont_conflict(self) -> None: # A bad fix made this case fail: https://github.com/nedbat/coveragepy/issues/541 self.make_file("a.py", "import b\na = 1") self.make_file("b.py", "b = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] source = . - """) + """, + ) # Just like: coverage run a.py cov = coverage.Coverage() @@ -616,7 +672,7 @@ def test_run_debug_sys(self) -> None: cov = coverage.Coverage() with cov.collect(): d = dict(cov.sys_info()) - assert cast(str, d['data_file']).endswith(".coverage") + assert cast(str, d["data_file"]).endswith(".coverage") @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core.") @@ -625,7 +681,9 @@ class SwitchContextTest(CoverageTest): def make_test_files(self) -> None: """Create a simple file representing a method with two tests.""" - self.make_file("testsuite.py", """\ + self.make_file( + "testsuite.py", + """\ def timestwo(x): return x*2 @@ -634,7 +692,8 @@ def test_multiply_zero(): def test_multiply_six(): assert timestwo(6) == 12 - """) + """, + ) def test_switch_context_testrunner(self) -> None: # This test simulates a coverage-aware test runner, @@ -648,11 +707,11 @@ def test_switch_context_testrunner(self) -> None: suite = import_local_file("testsuite") # Measures test case 1 - cov.switch_context('multiply_zero') + cov.switch_context("multiply_zero") suite.test_multiply_zero() # Measures test case 2 - cov.switch_context('multiply_six') + cov.switch_context("multiply_six") suite.test_multiply_six() # Runner finishes @@ -660,10 +719,10 @@ def test_switch_context_testrunner(self) -> None: # Labeled data is collected data = cov.get_data() - assert ['', 'multiply_six', 'multiply_zero'] == sorted(data.measured_contexts()) + assert ["", "multiply_six", "multiply_zero"] == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) - suite_filename = filenames['testsuite.py'] + suite_filename = filenames["testsuite.py"] data.set_query_context("multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) @@ -683,11 +742,11 @@ def test_switch_context_with_static(self) -> None: suite = import_local_file("testsuite") # Measures test case 1 - cov.switch_context('multiply_zero') + cov.switch_context("multiply_zero") suite.test_multiply_zero() # Measures test case 2 - cov.switch_context('multiply_six') + cov.switch_context("multiply_six") suite.test_multiply_six() # Runner finishes @@ -695,11 +754,11 @@ def test_switch_context_with_static(self) -> None: # Labeled data is collected data = cov.get_data() - expected = ['mysuite', 'mysuite|multiply_six', 'mysuite|multiply_zero'] + expected = ["mysuite", "mysuite|multiply_six", "mysuite|multiply_zero"] assert expected == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) - suite_filename = filenames['testsuite.py'] + suite_filename = filenames["testsuite.py"] data.set_query_context("mysuite|multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) @@ -779,7 +838,7 @@ def test_explicit_namespace_module(self) -> None: self.start_import_stop(cov, "main") with pytest.raises(CoverageException, match=r"Module .* has no file"): - cov.analysis(sys.modules['namespace_420']) + cov.analysis(sys.modules["namespace_420"]) def test_bug_572(self) -> None: self.make_file("main.py", "import namespace_420\n") @@ -798,7 +857,7 @@ class IncludeOmitTestsMixin(UsingModulesMixin, CoverageTest): # An abstract method for subclasses to define, to appease mypy. def coverage_usepkgs(self, **kwargs_unused: TCovKwargs) -> Iterable[str]: """Run coverage on usepkgs, return a line summary. kwargs are for Coverage(**kwargs).""" - raise NotImplementedError() # pragma: not covered + raise NotImplementedError() # pragma: not covered def filenames_in(self, summary: Iterable[str], filenames: str) -> None: """Assert the `filenames` are in the `summary`.""" @@ -904,7 +963,7 @@ def test_source_package_as_package(self) -> None: self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_source_package_as_dir(self) -> None: os.chdir("tests_dir_modules") @@ -913,13 +972,13 @@ def test_source_package_as_dir(self) -> None: self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_source_package_dotted_sub(self) -> None: lines = self.coverage_usepkgs_counts(source=["pkg1.sub"]) self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. - assert lines['runmod3'] == 0 + assert lines["runmod3"] == 0 def test_source_package_dotted_p1b(self) -> None: lines = self.coverage_usepkgs_counts(source=["pkg1.p1b"]) @@ -937,14 +996,14 @@ def test_source_package_part_omitted(self) -> None: lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["pkg1/p1b.py"]) self.filenames_in(list(lines), "p1a") self.filenames_not_in(list(lines), "p1b") - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_source_package_as_package_part_omitted(self) -> None: # https://github.com/nedbat/coveragepy/issues/638 lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["*/p1b.py"]) self.filenames_in(list(lines), "p1a") self.filenames_not_in(list(lines), "p1b") - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_ambiguous_source_package_as_dir(self) -> None: # pkg1 is a directory and a pkg, since we cd into tests_dir_modules/ambiguous @@ -961,7 +1020,7 @@ def test_ambiguous_source_package_as_package(self) -> None: self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb ambiguous") # Because source= was specified, we do search for un-executed files. - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_source_dirs(self) -> None: os.chdir("tests_dir_modules") @@ -970,7 +1029,7 @@ def test_source_dirs(self) -> None: self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source_dirs= was specified, we do search for un-executed files. - assert lines['p1c'] == 0 + assert lines["p1c"] == 0 def test_non_existent_source_dir(self) -> None: with pytest.raises( @@ -1012,10 +1071,13 @@ def coverage_usepkgs(self, **kwargs: TCovKwargs) -> Iterable[str]: class AnalysisTest(CoverageTest): """Test the numerical analysis of results.""" + def test_many_missing_branches(self) -> None: cov = coverage.Coverage(branch=True) - self.make_file("missing.py", """\ + self.make_file( + "missing.py", + """\ def fun1(x): if x == 1: print("one") @@ -1030,7 +1092,8 @@ def fun2(x): print("not x") fun2(3) - """) + """, + ) # Import the Python file, executing it. self.start_import_stop(cov, "missing") @@ -1062,14 +1125,18 @@ class TestRunnerPluginTest(CoverageTest): way they do. """ + def pretend_to_be_nose_with_cover(self, erase: bool = False, cd: bool = False) -> None: """This is what the nose --with-cover plugin does.""" - self.make_file("no_biggie.py", """\ + self.make_file( + "no_biggie.py", + """\ a = 1 b = 2 if b == 1: c = 4 - """) + """, + ) self.make_file("sub/hold.txt", "") cov = coverage.Coverage() @@ -1105,19 +1172,25 @@ def test_nose_plugin_with_cd(self) -> None: def pretend_to_be_pytestcov(self, append: bool) -> None: """Act like pytest-cov.""" - self.make_file("prog.py", """\ + self.make_file( + "prog.py", + """\ a = 1 b = 2 if b == 1: c = 4 - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] parallel = True source = . - """) + """, + ) - cov = coverage.Coverage(source=None, branch=None, config_file='.coveragerc') + cov = coverage.Coverage(source=None, branch=None, config_file=".coveragerc") if append: cov.load() else: @@ -1126,8 +1199,9 @@ def pretend_to_be_pytestcov(self, append: bool) -> None: cov.combine() cov.save() report = io.StringIO() - cov.report(show_missing=None, ignore_errors=True, file=report, skip_covered=None, - skip_empty=None) + cov.report( + show_missing=None, ignore_errors=True, file=report, skip_covered=None, skip_empty=None + ) assert report.getvalue() == textwrap.dedent("""\ Name Stmts Miss Cover ----------------------------- @@ -1182,10 +1256,13 @@ def test_moving_stuff(self) -> None: def test_moving_stuff_with_relative(self) -> None: # When using relative file names, moving the source around is fine. self.make_file("foo.py", "a = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] relative_files = true - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "foo") res = cov.report() @@ -1202,25 +1279,34 @@ def test_moving_stuff_with_relative(self) -> None: assert res == 100 def test_combine_relative(self) -> None: - self.make_file("foo.py", """\ + self.make_file( + "foo.py", + """\ import mod a = 1 - """) + """, + ) self.make_file("lib/mod/__init__.py", "x = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] relative_files = true - """) + """, + ) sys.path.append("lib") cov = coverage.Coverage(source=["."], data_suffix=True) self.start_import_stop(cov, "foo") cov.save() self.make_file("dir2/bar.py", "a = 1") - self.make_file("dir2/.coveragerc", """\ + self.make_file( + "dir2/.coveragerc", + """\ [run] relative_files = true - """) + """, + ) with change_dir("dir2"): cov = coverage.Coverage(source=["."], data_suffix=True) self.start_import_stop(cov, "bar") @@ -1231,14 +1317,17 @@ def test_combine_relative(self) -> None: self.make_file("bar.py", "a = 1") self.make_file("modsrc/__init__.py", "x = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] relative_files = true [paths] source = modsrc */mod - """) + """, + ) cov = coverage.Coverage() cov.combine() cov.save() @@ -1246,15 +1335,18 @@ def test_combine_relative(self) -> None: cov = coverage.Coverage() cov.load() files = cov.get_data().measured_files() - assert files == {'foo.py', 'bar.py', os_sep('modsrc/__init__.py')} + assert files == {"foo.py", "bar.py", os_sep("modsrc/__init__.py")} res = cov.report() assert res == 100 def test_combine_no_suffix_multiprocessing(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] branch = True - """) + """, + ) cov = coverage.Coverage( config_file=".coveragerc", concurrency="multiprocessing", @@ -1272,15 +1364,23 @@ def test_combine_no_suffix_multiprocessing(self) -> None: def test_files_up_one_level(self) -> None: # https://github.com/nedbat/coveragepy/issues/1280 - self.make_file("src/mycode.py", """\ + self.make_file( + "src/mycode.py", + """\ def foo(): return 17 - """) - self.make_file("test/test_it.py", """\ + """, + ) + self.make_file( + "test/test_it.py", + """\ from src.mycode import foo assert foo() == 17 - """) - self.make_file("test/.coveragerc", """\ + """, + ) + self.make_file( + "test/.coveragerc", + """\ [run] parallel = True relative_files = True @@ -1289,7 +1389,8 @@ def foo(): source = ../src/ */src - """) + """, + ) os.chdir("test") sys.path.insert(0, "..") cov1 = coverage.Coverage() @@ -1314,7 +1415,9 @@ def make_b_or_c_py(self) -> None: # "b_or_c.py b" will run 6 lines. # "b_or_c.py c" will run 7 lines. # Together, they run 8 lines. - self.make_file("b_or_c.py", """\ + self.make_file( + "b_or_c.py", + """\ import sys a = 2 if sys.argv[1] == 'b': @@ -1324,7 +1427,8 @@ def make_b_or_c_py(self) -> None: c2 = 7 d = 8 print('done') - """) + """, + ) def test_combine_parallel_data(self) -> None: self.make_b_or_c_py() @@ -1343,7 +1447,7 @@ def test_combine_parallel_data(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 # Running combine again should fail, because there are no parallel data # files to combine. @@ -1354,7 +1458,7 @@ def test_combine_parallel_data(self) -> None: # And the originally combined data is still there. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 def test_combine_parallel_data_with_a_corrupt_file(self) -> None: self.make_b_or_c_py() @@ -1384,7 +1488,7 @@ def test_combine_parallel_data_with_a_corrupt_file(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 def test_combine_no_usable_files(self) -> None: # https://github.com/nedbat/coveragepy/issues/629 @@ -1416,7 +1520,7 @@ def test_combine_no_usable_files(self) -> None: # executed (we only did b, not c). data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 6 + assert line_counts(data)["b_or_c.py"] == 6 def test_combine_parallel_data_in_two_steps(self) -> None: self.make_b_or_c_py() @@ -1446,7 +1550,7 @@ def test_combine_parallel_data_in_two_steps(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 def test_combine_parallel_data_no_append(self) -> None: self.make_b_or_c_py() @@ -1473,7 +1577,7 @@ def test_combine_parallel_data_no_append(self) -> None: # because we didn't keep the data from running b. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 7 + assert line_counts(data)["b_or_c.py"] == 7 def test_combine_parallel_data_keep(self) -> None: self.make_b_or_c_py() diff --git a/tests/test_arcs.py b/tests/test_arcs.py index 62edc6a73..9109e8664 100644 --- a/tests/test_arcs.py +++ b/tests/test_arcs.py @@ -20,20 +20,23 @@ class SimpleArcTest(CoverageTest): """Tests for coverage.py's arc measurement.""" def test_simple_sequence(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 3 """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 2 b = 3 @@ -44,7 +47,8 @@ def test_simple_sequence(self) -> None: ) def test_function_def(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def foo(): a = 2 @@ -54,7 +58,8 @@ def foo(): ) def test_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if len([]) == 0: a = 3 @@ -63,7 +68,8 @@ def test_if(self) -> None: branchz="23 24", branchz_missing="24", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if len([]) == 1: a = 3 @@ -74,7 +80,8 @@ def test_if(self) -> None: ) def test_if_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ if len([]) == 0: a = 2 else: @@ -84,7 +91,8 @@ def test_if_else(self) -> None: branchz="12 14", branchz_missing="14", ) - self.check_coverage("""\ + self.check_coverage( + """\ if len([]) == 1: a = 2 else: @@ -96,25 +104,30 @@ def test_if_else(self) -> None: ) def test_compact_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if len([]) == 0: a = 2 assert a == 2 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def fn(x): if x % 2: return True return False a = fn(1) assert a is True """, - branchz="2. 23", branchz_missing="23", + branchz="2. 23", + branchz_missing="23", ) def test_multiline(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = ( 2 + 3 @@ -126,7 +139,8 @@ def test_multiline(self) -> None: ) def test_if_return(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def if_ret(a): if a: return 3 @@ -140,7 +154,8 @@ def if_ret(a): ) def test_dont_confuse_exit_and_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def foo(): if foo: a = 3 @@ -152,7 +167,8 @@ def foo(): branchz="23 25", branchz_missing="25", ) - self.check_coverage("""\ + self.check_coverage( + """\ def foo(): if foo: a = 3 @@ -165,7 +181,8 @@ def foo(): ) def test_bug_1184(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def foo(x): if x: try: @@ -183,7 +200,8 @@ def foo(x): def test_bug_1991(self) -> None: # A bytecode was missing a line number, causing a KeyError in sysmon.py. - self.check_coverage("""\ + self.check_coverage( + """\ def func(x, y): for size in (x or ()) if y else (): print(size) @@ -203,7 +221,8 @@ def test_bug_576(self) -> None: branchz = "34 38 89 8D" branchz_missing = "38 8D" - self.check_coverage("""\ + self.check_coverage( + """\ foo = True if foo == True: @@ -228,7 +247,8 @@ class WithTest(CoverageTest): """Arc-measuring tests involving context managers.""" def test_with(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def example(): with open("test", "w", encoding="utf-8") as f: f.write("3") @@ -241,7 +261,8 @@ def example(): ) def test_with_return(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def example(): with open("test", "w", encoding="utf-8") as f: f.write("3") @@ -255,7 +276,8 @@ def example(): def test_bug_146(self) -> None: # https://github.com/nedbat/coveragepy/issues/146 - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(2): with open("test", "w", encoding="utf-8") as f: print(3) @@ -268,7 +290,8 @@ def test_bug_146(self) -> None: assert self.stdout() == "3\n4\n3\n4\n5\n" def test_nested_with_return(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def example(x): with open("test", "w", encoding="utf-8") as f2: a = 3 @@ -283,7 +306,8 @@ def example(x): ) def test_break_through_with(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(1+1): with open("test", "w", encoding="utf-8") as f: print(3) @@ -295,7 +319,8 @@ def test_break_through_with(self) -> None: ) def test_continue_through_with(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(1+1): with open("test", "w", encoding="utf-8") as f: print(3) @@ -308,7 +333,8 @@ def test_continue_through_with(self) -> None: # https://github.com/nedbat/coveragepy/issues/1270 def test_raise_through_with(self) -> None: - cov = self.check_coverage("""\ + cov = self.check_coverage( + """\ from contextlib import nullcontext def f(x): with nullcontext(): @@ -327,7 +353,8 @@ def f(x): assert self.get_missing_arc_description(cov, 3, -2) == expected def test_untaken_if_through_with(self) -> None: - cov = self.check_coverage("""\ + cov = self.check_coverage( + """\ from contextlib import nullcontext def f(x): with nullcontext(): @@ -345,7 +372,8 @@ def f(x): assert self.get_missing_arc_description(cov, 3, -2) == expected def test_untaken_raise_through_with(self) -> None: - cov = self.check_coverage("""\ + cov = self.check_coverage( + """\ from contextlib import nullcontext def f(x): with nullcontext(): @@ -366,7 +394,8 @@ def f(x): assert self.get_missing_arc_description(cov, 3, -2) == expected def test_leaving_module(self) -> None: - cov = self.check_coverage("""\ + cov = self.check_coverage( + """\ print(a := 1) if a == 1: print(3) @@ -379,7 +408,8 @@ def test_leaving_module(self) -> None: assert self.get_missing_arc_description(cov, 2, -1) == expected def test_with_with_lambda(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ from contextlib import nullcontext with nullcontext(lambda x: 2): print(3) @@ -391,7 +421,8 @@ def test_with_with_lambda(self) -> None: def test_multiline_with(self) -> None: # https://github.com/nedbat/coveragepy/issues/1880 - self.check_coverage("""\ + self.check_coverage( + """\ import contextlib, itertools nums = itertools.count() with ( @@ -407,7 +438,8 @@ def test_multiline_with(self) -> None: def test_multi_multiline_with(self) -> None: # https://github.com/nedbat/coveragepy/issues/1880 - self.check_coverage("""\ + self.check_coverage( + """\ import contextlib, itertools nums = itertools.count() with ( @@ -425,7 +457,8 @@ def test_multi_multiline_with(self) -> None: def test_multi_multiline_with_backslash(self) -> None: # https://github.com/nedbat/coveragepy/issues/1880 - self.check_coverage("""\ + self.check_coverage( + """\ import contextlib, itertools nums = itertools.count() with contextlib.nullcontext() as x, \\ @@ -444,7 +477,8 @@ class LoopArcTest(CoverageTest): """Arc-measuring tests involving loops.""" def test_loop(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(10): a = i assert a == 9 @@ -452,7 +486,8 @@ def test_loop(self) -> None: branchz="12 13", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = -1 for i in range(0): a = i @@ -463,7 +498,8 @@ def test_loop(self) -> None: ) def test_nested_loop(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(3): for j in range(3): a = i + j @@ -474,7 +510,8 @@ def test_nested_loop(self) -> None: ) def test_break(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(10): a = i break # 3 @@ -486,7 +523,8 @@ def test_break(self) -> None: ) def test_continue(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(10): a = i continue # 3 @@ -498,7 +536,8 @@ def test_continue(self) -> None: ) def test_nested_breaks(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(3): for j in range(3): a = i + j @@ -515,7 +554,8 @@ def test_if_1(self) -> None: lines = [1, 3, 6] if env.PYBEHAVIOR.keep_constant_test: lines.append(2) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if not not 1: a = 3 @@ -530,7 +570,8 @@ def test_if_1(self) -> None: def test_while_1(self) -> None: # With "while 1", the loop knows it's constant. - self.check_coverage("""\ + self.check_coverage( + """\ a, i = 1, 0 while 1: if i >= 3: @@ -547,7 +588,8 @@ def test_while_true(self) -> None: lines = [1, 3, 4, 5, 6, 7] if env.PYBEHAVIOR.keep_constant_test: lines.append(2) - self.check_coverage("""\ + self.check_coverage( + """\ a, i = 1, 0 while True: if i >= 3: @@ -565,7 +607,8 @@ def test_while_false(self) -> None: lines = [1, 4] if env.PYBEHAVIOR.keep_constant_test: lines.append(2) - self.check_coverage("""\ + self.check_coverage( + """\ a, i = 1, 0 while False: 1/0 @@ -577,7 +620,8 @@ def test_while_false(self) -> None: ) def test_while_not_false(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, i = 1, 0 while not False: if i >= 3: @@ -593,14 +637,17 @@ def test_while_not_false(self) -> None: def test_zero_coverage_while_loop(self) -> None: # https://github.com/nedbat/coveragepy/issues/502 self.make_file("main.py", "print('done')") - self.make_file("zero.py", """\ + self.make_file( + "zero.py", + """\ def method(self): while True: return 1 - """) + """, + ) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "main") - assert self.stdout() == 'done\n' + assert self.stdout() == "done\n" if env.PYBEHAVIOR.keep_constant_test: num_stmts = 3 else: @@ -612,7 +659,8 @@ def method(self): def test_bug_496_continue_in_constant_while(self) -> None: # https://github.com/nedbat/coveragepy/issues/496 - self.check_coverage("""\ + self.check_coverage( + """\ up = iter('ta') while True: char = next(up) @@ -626,7 +674,8 @@ def test_bug_496_continue_in_constant_while(self) -> None: ) def test_missing_while_body(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 if 0: while a > 0: @@ -638,7 +687,8 @@ def test_missing_while_body(self) -> None: ) def test_for_if_else_for(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def branches_2(l): if l: for e in l: @@ -662,7 +712,8 @@ def branches_3(l): ) def test_for_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def forelse(seq): for n in seq: if n > 5: @@ -675,7 +726,8 @@ def forelse(seq): branchz="23 26 34 32", branchz_missing="34", ) - self.check_coverage("""\ + self.check_coverage( + """\ def forelse(seq): for n in seq: if n > 5: @@ -690,7 +742,8 @@ def forelse(seq): ) def test_while_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def whileelse(seq): while seq: n = seq.pop() @@ -704,7 +757,8 @@ def whileelse(seq): branchz="23 27 45 42", branchz_missing="45", ) - self.check_coverage("""\ + self.check_coverage( + """\ def whileelse(seq): while seq: n = seq.pop() @@ -720,7 +774,8 @@ def whileelse(seq): ) def test_confusing_for_loop_bug_175(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ o = [(1,2), (3,4)] o = [a for a in o] for tup in o: @@ -730,7 +785,8 @@ def test_confusing_for_loop_bug_175(self) -> None: branchz="34 3.", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ o = [(1,2), (3,4)] for tup in [a for a in o]: x = tup[0] @@ -743,7 +799,8 @@ def test_confusing_for_loop_bug_175(self) -> None: # https://bugs.python.org/issue44672 @pytest.mark.xfail(env.PYVERSION < (3, 10), reason="<3.10 traced final pass incorrectly") def test_incorrect_loop_exit_bug_1175(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def wrong_loop(x): if x: for i in [3, 33]: @@ -759,7 +816,8 @@ def wrong_loop(x): # https://bugs.python.org/issue44672 def test_incorrect_if_bug_1175(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def wrong_loop(x): if x: if x: @@ -775,7 +833,8 @@ def wrong_loop(x): def test_generator_expression(self) -> None: # Generator expression: - self.check_coverage("""\ + self.check_coverage( + """\ o = ((1,2), (3,4)) o = (a for a in o) for tup in o: @@ -789,7 +848,8 @@ def test_generator_expression(self) -> None: def test_generator_expression_another_way(self) -> None: # https://bugs.python.org/issue44450 # Generator expression: - self.check_coverage("""\ + self.check_coverage( + """\ o = ((1,2), (3,4)) o = (a for a in @@ -804,7 +864,8 @@ def test_generator_expression_another_way(self) -> None: def test_other_comprehensions(self) -> None: # Set comprehension: - self.check_coverage("""\ + self.check_coverage( + """\ o = ((1,2), (3,4)) o = {a for a in o} for tup in o: @@ -815,7 +876,8 @@ def test_other_comprehensions(self) -> None: branchz_missing="", ) # Dict comprehension: - self.check_coverage("""\ + self.check_coverage( + """\ o = ((1,2), (3,4)) o = {a:1 for a in o} for tup in o: @@ -828,7 +890,8 @@ def test_other_comprehensions(self) -> None: def test_multiline_dict_comp(self) -> None: # Multiline dict comp: - self.check_coverage("""\ + self.check_coverage( + """\ # comment d = \\ { @@ -841,10 +904,12 @@ def test_multiline_dict_comp(self) -> None: } x = 11 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) # Multi dict comp: - self.check_coverage("""\ + self.check_coverage( + """\ # comment d = \\ { @@ -861,7 +926,8 @@ def test_multiline_dict_comp(self) -> None: } x = 15 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) @@ -869,7 +935,8 @@ class ExceptionArcTest(CoverageTest): """Arc-measuring tests involving exception handling.""" def test_try_except(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b = 1, 1 try: a = 3 @@ -877,11 +944,13 @@ def test_try_except(self) -> None: b = 5 assert a == 3 and b == 1 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_raise_followed_by_statement(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b = 1, 1 try: a = 3 @@ -891,11 +960,13 @@ def test_raise_followed_by_statement(self) -> None: b = 7 assert a == 3 and b == 7 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_hidden_raise(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b = 1, 1 def oops(x): if x % 2: @@ -913,7 +984,8 @@ def oops(x): ) def test_except_with_type(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b = 1, 1 def oops(x): if x % 2: @@ -934,7 +1006,8 @@ def try_it(x): ) def test_try_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, c = 1, 1 try: a = 3 @@ -944,7 +1017,8 @@ def test_try_finally(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d = 1, 1, 1 try: try: @@ -957,7 +1031,8 @@ def test_try_finally(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d = 1, 1, 1 try: try: @@ -974,7 +1049,8 @@ def test_try_finally(self) -> None: ) def test_finally_in_loop(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d, i = 1, 1, 1, 99 try: for i in range(5): @@ -992,7 +1068,8 @@ def test_finally_in_loop(self) -> None: branchz="34 3D 67 68", branchz_missing="3D", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d, i = 1, 1, 1, 99 try: for i in range(5): @@ -1011,9 +1088,9 @@ def test_finally_in_loop(self) -> None: branchz_missing="67", ) - def test_break_through_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d, i = 1, 1, 1, 99 try: for i in range(3): @@ -1033,7 +1110,8 @@ def test_break_through_finally(self) -> None: ) def test_break_continue_without_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, c, d, i = 1, 1, 1, 99 try: for i in range(3): @@ -1053,7 +1131,8 @@ def test_break_continue_without_finally(self) -> None: ) def test_continue_through_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c, d, i = 1, 1, 1, 1, 99 try: for i in range(3): @@ -1073,7 +1152,8 @@ def test_continue_through_finally(self) -> None: ) def test_finally_in_loop_bug_92(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for i in range(5): try: j = 3 @@ -1090,7 +1170,8 @@ def test_bug_212(self) -> None: # "except Exception as e" is crucial here. # Bug 212 said that the "if exc" line was incorrectly marked as only # partially covered. - self.check_coverage("""\ + self.check_coverage( + """\ def b(exc): try: while "no peephole".upper(): @@ -1111,7 +1192,8 @@ def b(exc): ) def test_except_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 try: a = 3 @@ -1123,7 +1205,8 @@ def test_except_finally(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 def oops(x): if x % 2: raise Exception("odd") @@ -1141,7 +1224,8 @@ def oops(x): ) def test_multiple_except_clauses(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 try: a = 3 @@ -1155,7 +1239,8 @@ def test_multiple_except_clauses(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 try: a = int("xyz") # ValueError @@ -1169,7 +1254,8 @@ def test_multiple_except_clauses(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 try: a = [1][3] # IndexError @@ -1183,7 +1269,8 @@ def test_multiple_except_clauses(self) -> None: """, branchz="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a, b, c = 1, 1, 1 try: try: @@ -1202,7 +1289,8 @@ def test_multiple_except_clauses(self) -> None: ) def test_return_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = [1] def check_token(data): if data: @@ -1221,7 +1309,8 @@ def check_token(data): ) def test_except_jump_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def func(x): a = f = g = 2 try: @@ -1258,7 +1347,8 @@ def func(x): ) def test_else_jump_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def func(x): a = f = g = 2 try: @@ -1293,7 +1383,7 @@ def func(x): assert func('other') == (2, 23, 2, 3) # W 32 """, branchz="45 4Q AB AD DE DG GH GJ JK JN", - branchz_missing="" + branchz_missing="", ) @@ -1301,7 +1391,8 @@ class YieldTest(CoverageTest): """Arc tests for generators.""" def test_yield_in_loop(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def gen(inp): for n in inp: yield n @@ -1313,7 +1404,8 @@ def gen(inp): ) def test_padded_yield_in_loop(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def gen(inp): i = 2 for n in inp: @@ -1329,7 +1421,8 @@ def gen(inp): ) def test_bug_308(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def run(): for i in range(10): yield lambda: i @@ -1340,7 +1433,8 @@ def run(): branchz="23 2. 56 5.", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def run(): yield lambda: 100 for i in range(10): @@ -1352,7 +1446,8 @@ def run(): branchz="34 3. 67 6.", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def run(): yield lambda: 100 # no branch miss @@ -1367,7 +1462,8 @@ def test_bug_324(self) -> None: # This code is tricky: the list() call pulls all the values from gen(), # but each of them is a generator itself that is never iterated. As a # result, the generator expression on line 3 is never entered or run. - self.check_coverage("""\ + self.check_coverage( + """\ def gen(inp): for n in inp: yield (i * 2 for i in range(n)) @@ -1379,7 +1475,8 @@ def gen(inp): ) def test_coroutines(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def double_inputs(): while len([1]): # avoid compiler differences x = yield @@ -1398,7 +1495,8 @@ def double_inputs(): assert self.stdout() == "20\n12\n" def test_yield_from(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def gen(inp): i = 2 for n in inp: @@ -1415,7 +1513,8 @@ def gen(inp): def test_abandoned_yield(self) -> None: # https://github.com/nedbat/coveragepy/issues/440 - self.check_coverage("""\ + self.check_coverage( + """\ def gen(): print(2) yield 3 @@ -1434,8 +1533,10 @@ def gen(): @pytest.mark.skipif(not env.PYBEHAVIOR.match_case, reason="Match-case is new in 3.10") class MatchCaseTest(CoverageTest): """Tests of match-case.""" + def test_match_case_with_default(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": @@ -1452,7 +1553,8 @@ def test_match_case_with_default(self) -> None: assert self.stdout() == "default\nno go\ngo: n\n" def test_match_case_with_named_default(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": @@ -1469,7 +1571,8 @@ def test_match_case_with_named_default(self) -> None: assert self.stdout() == "default\nno go\ngo: n\n" def test_match_case_with_wildcard(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": @@ -1486,7 +1589,8 @@ def test_match_case_with_wildcard(self) -> None: assert self.stdout() == "default: ['huh']\nno go\ngo: n\n" def test_match_case_without_wildcard(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ match = None for command in ["huh", "go home", "go n"]: match command.split(): @@ -1503,7 +1607,8 @@ def test_match_case_without_wildcard(self) -> None: def test_absurd_wildcards(self) -> None: # https://github.com/nedbat/coveragepy/issues/1421 - self.check_coverage("""\ + self.check_coverage( + """\ def absurd(x): match x: case (3 | 99 | (999 | _)): @@ -1515,7 +1620,8 @@ def absurd(x): branchz_missing="", ) assert self.stdout() == "default\n" - self.check_coverage("""\ + self.check_coverage( + """\ def absurd(x): match x: case (3 | 99 | 999 as y): @@ -1526,7 +1632,8 @@ def absurd(x): branchz_missing="34", ) assert self.stdout() == "" - self.check_coverage("""\ + self.check_coverage( + """\ def absurd(x): match x: case (3 | 17 as y): @@ -1539,7 +1646,8 @@ def absurd(x): branchz_missing="34 5-1", ) assert self.stdout() == "also not default\n" - self.check_coverage("""\ + self.check_coverage( + """\ def absurd(x): match x: case 3: @@ -1567,7 +1675,8 @@ def test_optimized_away_if_0(self) -> None: branchz = "23 28" branchz_missing = "28" - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if len([2]): c = 3 @@ -1593,7 +1702,8 @@ def test_optimized_away_if_1(self) -> None: branchz = "23 25 56 59" branchz_missing = "25 59" - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if len([2]): c = 3 @@ -1614,7 +1724,8 @@ def test_optimized_away_if_1_no_else(self) -> None: lines = [1, 2, 3, 4, 5] else: lines = [1, 3, 4, 5] - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if 1: b = 3 @@ -1632,7 +1743,8 @@ def test_optimized_if_nested(self) -> None: else: lines = [1, 12, 14, 15] - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if 0: if 0: @@ -1659,13 +1771,15 @@ def test_dunder_debug(self) -> None: # we expect assert __debug__ # Check that executed code has __debug__ - self.check_coverage("""\ + self.check_coverage( + """\ assert __debug__, "assert __debug__" """, ) # Check that if it didn't have debug, it would let us know. with pytest.raises(AssertionError): - self.check_coverage("""\ + self.check_coverage( + """\ assert not __debug__, "assert not __debug__" """, ) @@ -1675,7 +1789,8 @@ def test_if_debug(self) -> None: branchz = "12 1. 24 26" else: branchz = "12 1. 23 26" - self.check_coverage("""\ + self.check_coverage( + """\ for value in [True, False]: if value: if __debug__: @@ -1694,7 +1809,8 @@ def test_if_not_debug(self) -> None: assert env.PYBEHAVIOR.optimize_if_not_debug == 2 branchz = "23 28 35 37" - self.check_coverage("""\ + self.check_coverage( + """\ lines = set() for value in [True, False]: if value: @@ -1712,7 +1828,8 @@ class MiscArcTest(CoverageTest): """Miscellaneous arc-measuring tests.""" def test_dict_literal(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ d = { 'a': 2, 'b': 3, @@ -1723,9 +1840,11 @@ def test_dict_literal(self) -> None: } assert d """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ d = \\ { 'a': 2, 'b': 3, @@ -1736,11 +1855,13 @@ def test_dict_literal(self) -> None: } assert d """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_unpacked_literals(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ d = { 'a': 2, 'b': 3, @@ -1752,9 +1873,11 @@ def test_unpacked_literals(self) -> None: } assert weird['b'] == 3 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ l = [ 2, 3, @@ -1766,7 +1889,8 @@ def test_unpacked_literals(self) -> None: ] assert weird[1] == 3 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) @pytest.mark.parametrize("n", [10, 50, 100, 500, 1000, 2000, 10000]) @@ -1775,17 +1899,23 @@ def test_pathologically_long_code_object(self, n: int) -> None: # Long code objects sometimes cause problems. Originally, it was # due to EXTENDED_ARG bytes codes. Then it showed a mistake in # line-number packing. - code = """\ + code = ( + """\ data = [ - """ + "".join(f"""\ + """ + + "".join( + f"""\ [ {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}], - """ for i in range(n) - ) + """\ + """ + for i in range(n) + ) + + """\ ] print(len(data)) """ + ) self.check_coverage(code, branchz="") assert self.stdout() == f"{n}\n" @@ -1794,7 +1924,8 @@ def test_partial_generators(self) -> None: # Line 2 is executed completely. # Line 3 is started but not finished, because zip ends before it finishes. # Line 4 is never started. - self.check_coverage("""\ + self.check_coverage( + """\ def f(a, b): c = (i for i in a) # 2 d = (j for j in b) # 3 @@ -1812,7 +1943,8 @@ class DecoratorArcTest(CoverageTest): """Tests of arcs with decorators.""" def test_function_decorator(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def decorator(arg): def _dec(f): return f @@ -1829,11 +1961,13 @@ def my_function( a = 14 my_function() """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_class_decorator(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def decorator(arg): def _dec(c): return c @@ -1849,14 +1983,16 @@ class MyObject( X = 13 a = 14 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_bug_466a(self) -> None: # A bad interaction between decorators and multi-line list assignments, # believe it or not...! # This example makes more sense when considered in tandem with 466b below. - self.check_coverage("""\ + self.check_coverage( + """\ class Parser(object): @classmethod @@ -1868,13 +2004,15 @@ def parse(cls): Parser.parse() """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_bug_466b(self) -> None: # A bad interaction between decorators and multi-line list assignments, # believe it or not...! - self.check_coverage("""\ + self.check_coverage( + """\ class Parser(object): @classmethod @@ -1886,7 +2024,8 @@ def parse(cls): Parser.parse() """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) @@ -1894,7 +2033,8 @@ class LambdaArcTest(CoverageTest): """Tests of lambdas""" def test_multiline_lambda(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ fn = (lambda x: x + 2 ) @@ -1903,7 +2043,8 @@ def test_multiline_lambda(self) -> None: branchz="", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ fn = \\ ( @@ -1920,16 +2061,19 @@ def test_multiline_lambda(self) -> None: ) def test_unused_lambdas_are_confusing_bug_90(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 fn = lambda x: x b = 3 """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_raise_with_lambda_looks_like_partial_branch(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def ouch(fn): 2/0 a = b = c = d = 3 @@ -1949,7 +2093,8 @@ def ouch(fn): ) def test_lambda_in_dict(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ x = 1 x = 2 d = { @@ -1984,7 +2129,8 @@ class AsyncTest(CoverageTest): @skip_eventlet_670 def test_async(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import asyncio async def compute(x, y): # 3 @@ -2002,13 +2148,15 @@ async def print_sum(x, y): # 8 loop.run_until_complete(print_sum(1, 2)) loop.close() # G """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) assert self.stdout() == "Compute 1 + 2 ...\n1 + 2 = 3\n" @skip_eventlet_670 def test_async_for(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import asyncio class AsyncIteratorWrapper: # 3 @@ -2039,7 +2187,8 @@ async def doit(): # G assert self.stdout() == "a\nb\nc\n.\n" def test_async_with(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ async def go(): async with x: pass @@ -2049,7 +2198,8 @@ async def go(): ) def test_async_decorator(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def wrap(f): # 1 return f @@ -2065,7 +2215,8 @@ async def go(): # https://bugs.python.org/issue44621 @pytest.mark.skipif(env.PYVERSION[:2] == (3, 9), reason="avoid a 3.9 bug: 44621") def test_bug_1158(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import asyncio async def async_gen(): @@ -2091,7 +2242,8 @@ async def async_test(): # https://bugs.python.org/issue44622 @skip_eventlet_670 def test_bug_1176(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import asyncio async def async_gen(): @@ -2110,7 +2262,8 @@ async def async_test(): # https://github.com/nedbat/coveragepy/issues/1205 def test_bug_1205(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def func(): if T(2): if T(3): @@ -2134,7 +2287,8 @@ def func(): reason="CPython fix not backported to 3.9: https://github.com/python/cpython/issues/93061", ) def test_bug_1999(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import asyncio async def async_range(number): @@ -2160,13 +2314,15 @@ class AnnotationTest(CoverageTest): """Tests using type annotations.""" def test_annotations(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def f(x:str, y:int) -> str: a:int = 2 return f"{x}, {y}, {a}, 3" print(f("x", 4)) """, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) assert self.stdout() == "x, 4, 2, 3\n" @@ -2176,7 +2332,8 @@ class ExcludeTest(CoverageTest): def test_default(self) -> None: # A number of forms of pragma comment are accepted. - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a: #pragma: no branch b = 3 @@ -2192,23 +2349,25 @@ def test_default(self) -> None: else: h = 14 """, - lines=[1,2,3,4,5,6,7,8,9,10,14], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14], missing="", branchz="23 24 56 57 89 8A BC BE", branchz_missing="", ) def test_custom_pragmas(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 while a: # [only some] c = 3 break assert c == 5-2 """, - lines=[1,2,3,4,5], + lines=[1, 2, 3, 4, 5], partials=["only some"], - branchz="23 25", branchz_missing="", + branchz="23 25", + branchz_missing="", ) @@ -2218,13 +2377,16 @@ class LineDataTest(CoverageTest): def test_branch(self) -> None: cov = coverage.Coverage(branch=True) - self.make_file("fun1.py", """\ + self.make_file( + "fun1.py", + """\ def fun1(x): if x == 1: return fun1(3) - """) + """, + ) self.start_import_stop(cov, "fun1") diff --git a/tests/test_bytecode.py b/tests/test_bytecode.py index 931d4fc1d..aab155e51 100644 --- a/tests/test_bytecode.py +++ b/tests/test_bytecode.py @@ -31,7 +31,7 @@ def h(x): return x+1 """), "", - "exec" + "exec", ) objs = list(code_objects(code)) diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py index 37d9a0688..cacc8ad8d 100644 --- a/tests/test_cmdline.py +++ b/tests/test_cmdline.py @@ -41,36 +41,81 @@ class BaseCmdLineTest(CoverageTest): # uses when calling the function. _defaults = mock.Mock() _defaults.Coverage().annotate( - directory=None, ignore_errors=None, include=None, omit=None, morfs=[], + directory=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], contexts=None, ) _defaults.Coverage().html_report( - directory=None, ignore_errors=None, include=None, omit=None, morfs=[], - skip_covered=None, show_contexts=None, title=None, contexts=None, - skip_empty=None, precision=None, + directory=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], + skip_covered=None, + show_contexts=None, + title=None, + contexts=None, + skip_empty=None, + precision=None, ) _defaults.Coverage().report( - ignore_errors=None, include=None, omit=None, morfs=[], - show_missing=None, skip_covered=None, contexts=None, skip_empty=None, - precision=None, sort=None, output_format=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], + show_missing=None, + skip_covered=None, + contexts=None, + skip_empty=None, + precision=None, + sort=None, + output_format=None, ) _defaults.Coverage().xml_report( - ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, - contexts=None, skip_empty=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], + outfile=None, + contexts=None, + skip_empty=None, ) _defaults.Coverage().json_report( - ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, - contexts=None, pretty_print=None, show_contexts=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], + outfile=None, + contexts=None, + pretty_print=None, + show_contexts=None, ) _defaults.Coverage().lcov_report( - ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, + ignore_errors=None, + include=None, + omit=None, + morfs=[], + outfile=None, contexts=None, ) _defaults.Coverage( data_file=DEFAULT_DATAFILE, - cover_pylib=None, data_suffix=None, timid=None, branch=None, - config_file=True, source=None, include=None, omit=None, debug=None, - concurrency=None, check_preimported=True, context=None, messages=True, + cover_pylib=None, + data_suffix=None, + timid=None, + branch=None, + config_file=True, + source=None, + include=None, + omit=None, + debug=None, + concurrency=None, + check_preimported=True, + context=None, + messages=True, ) DEFAULT_KWARGS = {name: kw for name, _, kw in _defaults.mock_calls} @@ -96,7 +141,7 @@ def model_object(self) -> mock.Mock: return mk # Global names in cmdline.py that will be mocked during the tests. - MOCK_GLOBALS = ['Coverage', 'PyRunner', 'show_help'] + MOCK_GLOBALS = ["Coverage", "PyRunner", "show_help"] def mock_command_line( self, @@ -117,8 +162,7 @@ def mock_command_line( mk.config.set_option(name, value) patchers = [ - mock.patch("coverage.cmdline."+name, getattr(mk, name)) - for name in self.MOCK_GLOBALS + mock.patch("coverage.cmdline." + name, getattr(mk, name)) for name in self.MOCK_GLOBALS ] for patcher in patchers: patcher.start() @@ -146,7 +190,7 @@ def cmd_executes( expected = self.model_object() globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS} code_obj = compile(code, "", "exec", dont_inherit=True) - eval(code_obj, globs, {}) # pylint: disable=eval-used + eval(code_obj, globs, {}) # pylint: disable=eval-used # Many of our functions take a lot of arguments, and cmdline.py # calls them with many. But most of them are just the defaults, which @@ -173,7 +217,7 @@ def assert_same_mock_calls(self, m1: mock.Mock, m2: mock.Mock) -> None: if m1.mock_calls != m2.mock_calls: pp1 = pprint.pformat(m1.mock_calls) pp2 = pprint.pformat(m2.mock_calls) - assert pp1+'\n' == pp2+'\n' + assert pp1 + "\n" == pp2 + "\n" def cmd_help( self, @@ -191,13 +235,14 @@ def cmd_help( mk, status = self.mock_command_line(args) assert status == ret, f"Wrong status: got {status}, wanted {ret}" if help_msg: - assert mk.mock_calls[-1] == ('show_help', (help_msg,), {}) + assert mk.mock_calls[-1] == ("show_help", (help_msg,), {}) else: - assert mk.mock_calls[-1] == ('show_help', (), {'topic': topic}) + assert mk.mock_calls[-1] == ("show_help", (), {"topic": topic}) class BaseCmdLineTestTest(BaseCmdLineTest): """Tests that our BaseCmdLineTest helpers work.""" + def test_cmd_executes_same(self) -> None: # All the other tests here use self.cmd_executes_same in successful # ways, so here we just check that it fails. @@ -210,97 +255,145 @@ class CmdLineTest(BaseCmdLineTest): def test_annotate(self) -> None: # coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("annotate", """\ + self.cmd_executes( + "annotate", + """\ cov = Coverage() cov.load() cov.annotate() - """) - self.cmd_executes("annotate -d dir1", """\ + """, + ) + self.cmd_executes( + "annotate -d dir1", + """\ cov = Coverage() cov.load() cov.annotate(directory="dir1") - """) - self.cmd_executes("annotate -i", """\ + """, + ) + self.cmd_executes( + "annotate -i", + """\ cov = Coverage() cov.load() cov.annotate(ignore_errors=True) - """) - self.cmd_executes("annotate --omit fooey", """\ + """, + ) + self.cmd_executes( + "annotate --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.annotate(omit=["fooey"]) - """) - self.cmd_executes("annotate --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "annotate --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.annotate(omit=["fooey", "booey"]) - """) - self.cmd_executes("annotate mod1", """\ + """, + ) + self.cmd_executes( + "annotate mod1", + """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1"]) - """) - self.cmd_executes("annotate mod1 mod2 mod3", """\ + """, + ) + self.cmd_executes( + "annotate mod1 mod2 mod3", + """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1", "mod2", "mod3"]) - """) + """, + ) def test_combine(self) -> None: # coverage combine with args - self.cmd_executes("combine datadir1", """\ + self.cmd_executes( + "combine datadir1", + """\ cov = Coverage() cov.combine(["datadir1"], strict=True, keep=False) cov.save() - """) + """, + ) # coverage combine, appending - self.cmd_executes("combine --append datadir1", """\ + self.cmd_executes( + "combine --append datadir1", + """\ cov = Coverage() cov.load() cov.combine(["datadir1"], strict=True, keep=False) cov.save() - """) + """, + ) # coverage combine without args - self.cmd_executes("combine", """\ + self.cmd_executes( + "combine", + """\ cov = Coverage() cov.combine(None, strict=True, keep=False) cov.save() - """) + """, + ) # coverage combine quietly - self.cmd_executes("combine -q", """\ + self.cmd_executes( + "combine -q", + """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() - """) - self.cmd_executes("combine --quiet", """\ + """, + ) + self.cmd_executes( + "combine --quiet", + """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() - """) - self.cmd_executes("combine --data-file=foo.cov", """\ + """, + ) + self.cmd_executes( + "combine --data-file=foo.cov", + """\ cov = Coverage(data_file="foo.cov") cov.combine(None, strict=True, keep=False) cov.save() - """) + """, + ) def test_combine_doesnt_confuse_options_with_args(self) -> None: # https://github.com/nedbat/coveragepy/issues/385 - self.cmd_executes("combine --rcfile cov.ini", """\ + self.cmd_executes( + "combine --rcfile cov.ini", + """\ cov = Coverage(config_file='cov.ini') cov.combine(None, strict=True, keep=False) cov.save() - """) - self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\ + """, + ) + self.cmd_executes( + "combine --rcfile cov.ini data1 data2/more", + """\ cov = Coverage(config_file='cov.ini') cov.combine(["data1", "data2/more"], strict=True, keep=False) cov.save() - """) + """, + ) - @pytest.mark.parametrize("cmd, output", [ - ("debug", "What information would you like: config, data, sys, premain, pybehave?"), - ("debug foo", "Don't know what you mean by 'foo'"), - ("debug sys config", "Only one topic at a time, please"), - ]) + @pytest.mark.parametrize( + "cmd, output", + [ + ("debug", "What information would you like: config, data, sys, premain, pybehave?"), + ("debug foo", "Don't know what you mean by 'foo'"), + ("debug sys config", "Only one topic at a time, please"), + ], + ) def test_debug(self, cmd: str, output: str) -> None: self.cmd_help(cmd, output) @@ -325,8 +418,8 @@ def test_debug_pybehave(self) -> None: assert " pep626:" in out # Some things that shouldn't appear.. - assert "typing." not in out # import from typing - assert ": <" not in out # objects without a good repr + assert "typing." not in out # import from typing + assert ": <" not in out # objects without a good repr # It should report PYVERSION correctly. pyversion = re_line(r" PYVERSION:", out) @@ -349,23 +442,29 @@ def test_debug_premain(self) -> None: s = re.escape(os.sep) assert lines[0].startswith("-- premain ----") assert len(lines) > 25 - assert re.search(fr"{s}site-packages{s}_pytest{s}", out) - assert re.search(fr"{s}site-packages{s}pluggy{s}", out) - assert re.search(fr"(?m)^\s+test_debug_premain : .*{s}tests{s}test_cmdline.py:\d+$", out) - assert re.search(fr"(?m)^\s+command_line : .*{s}coverage{s}cmdline.py:\d+$", out) - assert re.search(fr"(?m)^\s+do_debug : .*{s}coverage{s}cmdline.py:\d+$", out) + assert re.search(rf"{s}site-packages{s}_pytest{s}", out) + assert re.search(rf"{s}site-packages{s}pluggy{s}", out) + assert re.search(rf"(?m)^\s+test_debug_premain : .*{s}tests{s}test_cmdline.py:\d+$", out) + assert re.search(rf"(?m)^\s+command_line : .*{s}coverage{s}cmdline.py:\d+$", out) + assert re.search(rf"(?m)^\s+do_debug : .*{s}coverage{s}cmdline.py:\d+$", out) assert "do_debug : " in lines[-1] def test_erase(self) -> None: # coverage erase - self.cmd_executes("erase", """\ + self.cmd_executes( + "erase", + """\ cov = Coverage() cov.erase() - """) - self.cmd_executes("erase --data-file=foo.cov", """\ + """, + ) + self.cmd_executes( + "erase --data-file=foo.cov", + """\ cov = Coverage(data_file="foo.cov") cov.erase() - """) + """, + ) def test_version(self) -> None: # coverage --version @@ -385,256 +484,399 @@ def test_cmd_help(self) -> None: def test_html(self) -> None: # coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("html", """\ + self.cmd_executes( + "html", + """\ cov = Coverage() cov.load() cov.html_report() - """) - self.cmd_executes("html -d dir1", """\ + """, + ) + self.cmd_executes( + "html -d dir1", + """\ cov = Coverage() cov.load() cov.html_report(directory="dir1") - """) - self.cmd_executes("html -i", """\ + """, + ) + self.cmd_executes( + "html -i", + """\ cov = Coverage() cov.load() cov.html_report(ignore_errors=True) - """) - self.cmd_executes("html --omit fooey", """\ + """, + ) + self.cmd_executes( + "html --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.html_report(omit=["fooey"]) - """) - self.cmd_executes("html --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "html --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.html_report(omit=["fooey", "booey"]) - """) - self.cmd_executes("html mod1", """\ + """, + ) + self.cmd_executes( + "html mod1", + """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1"]) - """) - self.cmd_executes("html mod1 mod2 mod3", """\ + """, + ) + self.cmd_executes( + "html mod1 mod2 mod3", + """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1", "mod2", "mod3"]) - """) - self.cmd_executes("html --precision=3", """\ + """, + ) + self.cmd_executes( + "html --precision=3", + """\ cov = Coverage() cov.load() cov.html_report(precision=3) - """) - self.cmd_executes("html --title=Hello_there", """\ + """, + ) + self.cmd_executes( + "html --title=Hello_there", + """\ cov = Coverage() cov.load() cov.html_report(title='Hello_there') - """) - self.cmd_executes("html -q", """\ + """, + ) + self.cmd_executes( + "html -q", + """\ cov = Coverage(messages=False) cov.load() cov.html_report() - """) - self.cmd_executes("html --quiet", """\ + """, + ) + self.cmd_executes( + "html --quiet", + """\ cov = Coverage(messages=False) cov.load() cov.html_report() - """) + """, + ) def test_json(self) -> None: # coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("json", """\ + self.cmd_executes( + "json", + """\ cov = Coverage() cov.load() cov.json_report() - """) - self.cmd_executes("json --pretty-print", """\ + """, + ) + self.cmd_executes( + "json --pretty-print", + """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True) - """) - self.cmd_executes("json --pretty-print --show-contexts", """\ + """, + ) + self.cmd_executes( + "json --pretty-print --show-contexts", + """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True, show_contexts=True) - """) - self.cmd_executes("json -i", """\ + """, + ) + self.cmd_executes( + "json -i", + """\ cov = Coverage() cov.load() cov.json_report(ignore_errors=True) - """) - self.cmd_executes("json -o myjson.foo", """\ + """, + ) + self.cmd_executes( + "json -o myjson.foo", + """\ cov = Coverage() cov.load() cov.json_report(outfile="myjson.foo") - """) - self.cmd_executes("json -o -", """\ + """, + ) + self.cmd_executes( + "json -o -", + """\ cov = Coverage() cov.load() cov.json_report(outfile="-") - """) - self.cmd_executes("json --omit fooey", """\ + """, + ) + self.cmd_executes( + "json --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.json_report(omit=["fooey"]) - """) - self.cmd_executes("json --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "json --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.json_report(omit=["fooey", "booey"]) - """) - self.cmd_executes("json mod1", """\ + """, + ) + self.cmd_executes( + "json mod1", + """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1"]) - """) - self.cmd_executes("json mod1 mod2 mod3", """\ + """, + ) + self.cmd_executes( + "json mod1 mod2 mod3", + """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1", "mod2", "mod3"]) - """) - self.cmd_executes("json -q", """\ + """, + ) + self.cmd_executes( + "json -q", + """\ cov = Coverage(messages=False) cov.load() cov.json_report() - """) - self.cmd_executes("json --quiet", """\ + """, + ) + self.cmd_executes( + "json --quiet", + """\ cov = Coverage(messages=False) cov.load() cov.json_report() - """) + """, + ) def test_lcov(self) -> None: # coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("lcov", """\ + self.cmd_executes( + "lcov", + """\ cov = Coverage() cov.load() cov.lcov_report() - """) - self.cmd_executes("lcov -i", """\ + """, + ) + self.cmd_executes( + "lcov -i", + """\ cov = Coverage() cov.load() cov.lcov_report(ignore_errors=True) - """) - self.cmd_executes("lcov -o mylcov.foo", """\ + """, + ) + self.cmd_executes( + "lcov -o mylcov.foo", + """\ cov = Coverage() cov.load() cov.lcov_report(outfile="mylcov.foo") - """) - self.cmd_executes("lcov -o -", """\ + """, + ) + self.cmd_executes( + "lcov -o -", + """\ cov = Coverage() cov.load() cov.lcov_report(outfile="-") - """) - self.cmd_executes("lcov --omit fooey", """\ + """, + ) + self.cmd_executes( + "lcov --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.lcov_report(omit=["fooey"]) - """) - self.cmd_executes("lcov --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "lcov --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.lcov_report(omit=["fooey", "booey"]) - """) - self.cmd_executes("lcov -q", """\ + """, + ) + self.cmd_executes( + "lcov -q", + """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() - """) - self.cmd_executes("lcov --quiet", """\ + """, + ) + self.cmd_executes( + "lcov --quiet", + """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() - """) + """, + ) def test_report(self) -> None: # coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("report", """\ + self.cmd_executes( + "report", + """\ cov = Coverage() cov.load() cov.report(show_missing=None) - """) - self.cmd_executes("report -i", """\ + """, + ) + self.cmd_executes( + "report -i", + """\ cov = Coverage() cov.load() cov.report(ignore_errors=True) - """) - self.cmd_executes("report -m", """\ + """, + ) + self.cmd_executes( + "report -m", + """\ cov = Coverage() cov.load() cov.report(show_missing=True) - """) - self.cmd_executes("report --omit fooey", """\ + """, + ) + self.cmd_executes( + "report --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.report(omit=["fooey"]) - """) - self.cmd_executes("report --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "report --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.report(omit=["fooey", "booey"]) - """) - self.cmd_executes("report mod1", """\ + """, + ) + self.cmd_executes( + "report mod1", + """\ cov = Coverage() cov.load() cov.report(morfs=["mod1"]) - """) - self.cmd_executes("report mod1 mod2 mod3", """\ + """, + ) + self.cmd_executes( + "report mod1 mod2 mod3", + """\ cov = Coverage() cov.load() cov.report(morfs=["mod1", "mod2", "mod3"]) - """) - self.cmd_executes("report --precision=7", """\ + """, + ) + self.cmd_executes( + "report --precision=7", + """\ cov = Coverage() cov.load() cov.report(precision=7) - """) - self.cmd_executes("report --skip-covered", """\ + """, + ) + self.cmd_executes( + "report --skip-covered", + """\ cov = Coverage() cov.load() cov.report(skip_covered=True) - """) - self.cmd_executes("report --skip-covered --no-skip-covered", """\ + """, + ) + self.cmd_executes( + "report --skip-covered --no-skip-covered", + """\ cov = Coverage() cov.load() cov.report(skip_covered=False) - """) - self.cmd_executes("report --no-skip-covered", """\ + """, + ) + self.cmd_executes( + "report --no-skip-covered", + """\ cov = Coverage() cov.load() cov.report(skip_covered=False) - """) - self.cmd_executes("report --skip-empty", """\ + """, + ) + self.cmd_executes( + "report --skip-empty", + """\ cov = Coverage() cov.load() cov.report(skip_empty=True) - """) - self.cmd_executes("report --contexts=foo,bar", """\ + """, + ) + self.cmd_executes( + "report --contexts=foo,bar", + """\ cov = Coverage() cov.load() cov.report(contexts=["foo", "bar"]) - """) - self.cmd_executes("report --sort=-foo", """\ + """, + ) + self.cmd_executes( + "report --sort=-foo", + """\ cov = Coverage() cov.load() cov.report(sort='-foo') - """) - self.cmd_executes("report --data-file=foo.cov.2", """\ + """, + ) + self.cmd_executes( + "report --data-file=foo.cov.2", + """\ cov = Coverage(data_file="foo.cov.2") cov.load() cov.report(show_missing=None) - """) - self.cmd_executes("report --format=markdown", """\ + """, + ) + self.cmd_executes( + "report --format=markdown", + """\ cov = Coverage() cov.load() cov.report(output_format="markdown") - """) + """, + ) def test_run(self) -> None: # coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] # run calls coverage.erase first. - self.cmd_executes("run foo.py", """\ + self.cmd_executes( + "run foo.py", + """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -642,9 +884,12 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) # run -a combines with an existing data file before saving. - self.cmd_executes("run -a foo.py", """\ + self.cmd_executes( + "run -a foo.py", + """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -653,9 +898,12 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) # --timid sets a flag, and program arguments get passed through. - self.cmd_executes("run --timid foo.py abc 123", """\ + self.cmd_executes( + "run --timid foo.py abc 123", + """\ cov = Coverage(timid=True) runner = PyRunner(['foo.py', 'abc', '123'], as_module=False) runner.prepare() @@ -663,9 +911,12 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) # -L sets a flag, and flags for the program don't confuse us. - self.cmd_executes("run -p -L foo.py -a -b", """\ + self.cmd_executes( + "run -p -L foo.py -a -b", + """\ cov = Coverage(cover_pylib=True, data_suffix=True) runner = PyRunner(['foo.py', '-a', '-b'], as_module=False) runner.prepare() @@ -673,8 +924,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --branch foo.py", """\ + """, + ) + self.cmd_executes( + "run --branch foo.py", + """\ cov = Coverage(branch=True) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -682,8 +936,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --rcfile=myrc.rc foo.py", """\ + """, + ) + self.cmd_executes( + "run --rcfile=myrc.rc foo.py", + """\ cov = Coverage(config_file="myrc.rc") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -691,8 +948,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --include=pre1,pre2 foo.py", """\ + """, + ) + self.cmd_executes( + "run --include=pre1,pre2 foo.py", + """\ cov = Coverage(include=["pre1", "pre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -700,8 +960,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --omit=opre1,opre2 foo.py", """\ + """, + ) + self.cmd_executes( + "run --omit=opre1,opre2 foo.py", + """\ cov = Coverage(omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -709,8 +972,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\ + """, + ) + self.cmd_executes( + "run --include=pre1,pre2 --omit=opre1,opre2 foo.py", + """\ cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -718,8 +984,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\ + """, + ) + self.cmd_executes( + "run --source=quux,hi.there,/home/bar foo.py", + """\ cov = Coverage(source=["quux", "hi.there", "/home/bar"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -727,8 +996,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --concurrency=gevent foo.py", """\ + """, + ) + self.cmd_executes( + "run --concurrency=gevent foo.py", + """\ cov = Coverage(concurrency=['gevent']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -736,8 +1008,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --concurrency=multiprocessing foo.py", """\ + """, + ) + self.cmd_executes( + "run --concurrency=multiprocessing foo.py", + """\ cov = Coverage(concurrency=['multiprocessing']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -745,8 +1020,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --concurrency=gevent,thread foo.py", """\ + """, + ) + self.cmd_executes( + "run --concurrency=gevent,thread foo.py", + """\ cov = Coverage(concurrency=['gevent', 'thread']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -754,8 +1032,11 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --data-file=output.coverage foo.py", """\ + """, + ) + self.cmd_executes( + "run --data-file=output.coverage foo.py", + """\ cov = Coverage(data_file="output.coverage") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -763,7 +1044,8 @@ def test_run(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) def test_multiprocessing_needs_config_file(self) -> None: # You can't use command-line args to add options to multiprocessing @@ -776,7 +1058,9 @@ def test_multiprocessing_needs_config_file(self) -> None: assert "Remove --branch from the command line." in err def test_run_debug(self) -> None: - self.cmd_executes("run --debug=opt1 foo.py", """\ + self.cmd_executes( + "run --debug=opt1 foo.py", + """\ cov = Coverage(debug=["opt1"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -784,8 +1068,11 @@ def test_run_debug(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --debug=opt1,opt2 foo.py", """\ + """, + ) + self.cmd_executes( + "run --debug=opt1,opt2 foo.py", + """\ cov = Coverage(debug=["opt1","opt2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() @@ -793,10 +1080,13 @@ def test_run_debug(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) def test_run_module(self) -> None: - self.cmd_executes("run -m mymodule", """\ + self.cmd_executes( + "run -m mymodule", + """\ cov = Coverage() runner = PyRunner(['mymodule'], as_module=True) runner.prepare() @@ -804,8 +1094,11 @@ def test_run_module(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run -m mymodule -qq arg1 arg2", """\ + """, + ) + self.cmd_executes( + "run -m mymodule -qq arg1 arg2", + """\ cov = Coverage() runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True) runner.prepare() @@ -813,8 +1106,11 @@ def test_run_module(self) -> None: runner.run() cov.stop() cov.save() - """) - self.cmd_executes("run --branch -m mymodule", """\ + """, + ) + self.cmd_executes( + "run --branch -m mymodule", + """\ cov = Coverage(branch=True) runner = PyRunner(['mymodule'], as_module=True) runner.prepare() @@ -822,7 +1118,8 @@ def test_run_module(self) -> None: runner.run() cov.stop() cov.save() - """) + """, + ) self.cmd_executes_same("run -m mymodule", "run --module mymodule") def test_run_nothing(self) -> None: @@ -831,7 +1128,9 @@ def test_run_nothing(self) -> None: def test_run_from_config(self) -> None: options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"} - self.cmd_executes("run", """\ + self.cmd_executes( + "run", + """\ cov = Coverage() runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False) runner.prepare() @@ -844,7 +1143,9 @@ def test_run_from_config(self) -> None: ) def test_run_module_from_config(self) -> None: - self.cmd_executes("run", """\ + self.cmd_executes( + "run", + """\ cov = Coverage() runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True) runner.prepare() @@ -857,7 +1158,9 @@ def test_run_module_from_config(self) -> None: ) def test_run_from_config_but_empty(self) -> None: - self.cmd_executes("run", """\ + self.cmd_executes( + "run", + """\ cov = Coverage() show_help('Nothing to do.') """, @@ -866,13 +1169,17 @@ def test_run_from_config_but_empty(self) -> None: ) def test_run_dashm_only(self) -> None: - self.cmd_executes("run -m", """\ + self.cmd_executes( + "run -m", + """\ cov = Coverage() show_help('No module specified for -m') """, ret=ERR, ) - self.cmd_executes("run -m", """\ + self.cmd_executes( + "run -m", + """\ cov = Coverage() show_help('No module specified for -m') """, @@ -886,56 +1193,86 @@ def test_cant_append_parallel(self) -> None: def test_xml(self) -> None: # coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...] - self.cmd_executes("xml", """\ + self.cmd_executes( + "xml", + """\ cov = Coverage() cov.load() cov.xml_report() - """) - self.cmd_executes("xml -i", """\ + """, + ) + self.cmd_executes( + "xml -i", + """\ cov = Coverage() cov.load() cov.xml_report(ignore_errors=True) - """) - self.cmd_executes("xml -o myxml.foo", """\ + """, + ) + self.cmd_executes( + "xml -o myxml.foo", + """\ cov = Coverage() cov.load() cov.xml_report(outfile="myxml.foo") - """) - self.cmd_executes("xml -o -", """\ + """, + ) + self.cmd_executes( + "xml -o -", + """\ cov = Coverage() cov.load() cov.xml_report(outfile="-") - """) - self.cmd_executes("xml --omit fooey", """\ + """, + ) + self.cmd_executes( + "xml --omit fooey", + """\ cov = Coverage(omit=["fooey"]) cov.load() cov.xml_report(omit=["fooey"]) - """) - self.cmd_executes("xml --omit fooey,booey", """\ + """, + ) + self.cmd_executes( + "xml --omit fooey,booey", + """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.xml_report(omit=["fooey", "booey"]) - """) - self.cmd_executes("xml mod1", """\ + """, + ) + self.cmd_executes( + "xml mod1", + """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1"]) - """) - self.cmd_executes("xml mod1 mod2 mod3", """\ + """, + ) + self.cmd_executes( + "xml mod1 mod2 mod3", + """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1", "mod2", "mod3"]) - """) - self.cmd_executes("xml -q", """\ + """, + ) + self.cmd_executes( + "xml -q", + """\ cov = Coverage(messages=False) cov.load() cov.xml_report() - """) - self.cmd_executes("xml --quiet", """\ + """, + ) + self.cmd_executes( + "xml --quiet", + """\ cov = Coverage(messages=False) cov.load() cov.xml_report() - """) + """, + ) def test_no_arguments_at_all(self) -> None: self.cmd_help("", topic="minimum_help", ret=OK) @@ -1034,7 +1371,7 @@ def test_help_contains_command_name(self) -> None: fake_command_path = os_sep("lorem/ipsum/dolor") expected_command_name = "dolor" fake_argv = [fake_command_path, "sit", "amet"] - with mock.patch.object(sys, 'argv', new=fake_argv): + with mock.patch.object(sys, "argv", new=fake_argv): self.command_line("help") out = self.stdout() assert expected_command_name in out @@ -1049,7 +1386,7 @@ def test_help_contains_command_name_from_package(self) -> None: fake_command_path = os_sep("lorem/ipsum/dolor/__main__.py") expected_command_name = "dolor" fake_argv = [fake_command_path, "sit", "amet"] - with mock.patch.object(sys, 'argv', new=fake_argv): + with mock.patch.object(sys, "argv", new=fake_argv): self.command_line("help") out = self.stdout() assert expected_command_name in out @@ -1102,16 +1439,16 @@ class CoverageScriptStub: def command_line(self, argv: list[str]) -> int: """Stub for command_line, the arg determines what it will do.""" - if argv[0] == 'hello': + if argv[0] == "hello": print("Hello, world!") - elif argv[0] == 'raise': + elif argv[0] == "raise": try: raise RuntimeError("oh noes!") except: raise _ExceptionDuringRun(*sys.exc_info()) from None - elif argv[0] == 'internalraise': + elif argv[0] == "internalraise": raise ValueError("coverage is broken") - elif argv[0] == 'exit': + elif argv[0] == "exit": sys.exit(23) else: raise AssertionError(f"Bad CoverageScriptStub: {argv!r}") @@ -1120,36 +1457,37 @@ def command_line(self, argv: list[str]) -> int: def setUp(self) -> None: super().setUp() old_CoverageScript = coverage.cmdline.CoverageScript - coverage.cmdline.CoverageScript = self.CoverageScriptStub # type: ignore - self.addCleanup(setattr, coverage.cmdline, 'CoverageScript', old_CoverageScript) + coverage.cmdline.CoverageScript = self.CoverageScriptStub # type: ignore + self.addCleanup(setattr, coverage.cmdline, "CoverageScript", old_CoverageScript) def test_normal(self) -> None: - ret = coverage.cmdline.main(['hello']) + ret = coverage.cmdline.main(["hello"]) assert ret == 0 assert self.stdout() == "Hello, world!\n" def test_raise(self) -> None: - ret = coverage.cmdline.main(['raise']) + ret = coverage.cmdline.main(["raise"]) assert ret == 1 out, err = self.stdouterr() assert out == "" print(err) err_parts = err.splitlines(keepends=True) - assert err_parts[0] == 'Traceback (most recent call last):\n' + assert err_parts[0] == "Traceback (most recent call last):\n" assert ' raise RuntimeError("oh noes!")\n' in err_parts - assert err_parts[-1] == 'RuntimeError: oh noes!\n' + assert err_parts[-1] == "RuntimeError: oh noes!\n" def test_internalraise(self) -> None: with pytest.raises(ValueError, match="coverage is broken"): - coverage.cmdline.main(['internalraise']) + coverage.cmdline.main(["internalraise"]) def test_exit(self) -> None: - ret = coverage.cmdline.main(['exit']) + ret = coverage.cmdline.main(["exit"]) assert ret == 23 class CoverageReportingFake: """A fake Coverage.coverage test double for FailUnderTest methods.""" + # pylint: disable=missing-function-docstring def __init__( self, @@ -1194,32 +1532,35 @@ def lcov_report(self, *args_unused: Any, **kwargs_unused: Any) -> float: class FailUnderTest(CoverageTest): """Tests of the --fail-under handling in cmdline.py.""" - @pytest.mark.parametrize("results, fail_under, cmd, ret", [ - # Command-line switch properly checks the result of reporting functions. - ((20, 30, 40, 50, 60), None, "report --fail-under=19", 0), - ((20, 30, 40, 50, 60), None, "report --fail-under=21", 2), - ((20, 30, 40, 50, 60), None, "html --fail-under=29", 0), - ((20, 30, 40, 50, 60), None, "html --fail-under=31", 2), - ((20, 30, 40, 50, 60), None, "xml --fail-under=39", 0), - ((20, 30, 40, 50, 60), None, "xml --fail-under=41", 2), - ((20, 30, 40, 50, 60), None, "json --fail-under=49", 0), - ((20, 30, 40, 50, 60), None, "json --fail-under=51", 2), - ((20, 30, 40, 50, 60), None, "lcov --fail-under=59", 0), - ((20, 30, 40, 50, 60), None, "lcov --fail-under=61", 2), - # Configuration file setting properly checks the result of reporting. - ((20, 30, 40, 50, 60), 19, "report", 0), - ((20, 30, 40, 50, 60), 21, "report", 2), - ((20, 30, 40, 50, 60), 29, "html", 0), - ((20, 30, 40, 50, 60), 31, "html", 2), - ((20, 30, 40, 50, 60), 39, "xml", 0), - ((20, 30, 40, 50, 60), 41, "xml", 2), - ((20, 30, 40, 50, 60), 49, "json", 0), - ((20, 30, 40, 50, 60), 51, "json", 2), - ((20, 30, 40, 50, 60), 59, "lcov", 0), - ((20, 30, 40, 50, 60), 61, "lcov", 2), - # Command-line overrides configuration. - ((20, 30, 40, 50, 60), 19, "report --fail-under=21", 2), - ]) + @pytest.mark.parametrize( + "results, fail_under, cmd, ret", + [ + # Command-line switch properly checks the result of reporting functions. + ((20, 30, 40, 50, 60), None, "report --fail-under=19", 0), + ((20, 30, 40, 50, 60), None, "report --fail-under=21", 2), + ((20, 30, 40, 50, 60), None, "html --fail-under=29", 0), + ((20, 30, 40, 50, 60), None, "html --fail-under=31", 2), + ((20, 30, 40, 50, 60), None, "xml --fail-under=39", 0), + ((20, 30, 40, 50, 60), None, "xml --fail-under=41", 2), + ((20, 30, 40, 50, 60), None, "json --fail-under=49", 0), + ((20, 30, 40, 50, 60), None, "json --fail-under=51", 2), + ((20, 30, 40, 50, 60), None, "lcov --fail-under=59", 0), + ((20, 30, 40, 50, 60), None, "lcov --fail-under=61", 2), + # Configuration file setting properly checks the result of reporting. + ((20, 30, 40, 50, 60), 19, "report", 0), + ((20, 30, 40, 50, 60), 21, "report", 2), + ((20, 30, 40, 50, 60), 29, "html", 0), + ((20, 30, 40, 50, 60), 31, "html", 2), + ((20, 30, 40, 50, 60), 39, "xml", 0), + ((20, 30, 40, 50, 60), 41, "xml", 2), + ((20, 30, 40, 50, 60), 49, "json", 0), + ((20, 30, 40, 50, 60), 51, "json", 2), + ((20, 30, 40, 50, 60), 59, "lcov", 0), + ((20, 30, 40, 50, 60), 61, "lcov", 2), + # Command-line overrides configuration. + ((20, 30, 40, 50, 60), 19, "report --fail-under=21", 2), + ], + ) def test_fail_under( self, results: tuple[float, float, float, float, float], @@ -1230,19 +1571,30 @@ def test_fail_under( cov = CoverageReportingFake(*results) if fail_under is not None: cov.set_option("report:fail_under", fail_under) - with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov): + with mock.patch("coverage.cmdline.Coverage", lambda *a, **kw: cov): self.command_line(cmd, ret) - @pytest.mark.parametrize("result, cmd, ret, msg", [ - (20.5, "report --fail-under=20.4 --precision=1", 0, ""), - (20.5, "report --fail-under=20.6 --precision=1", 2, - "Coverage failure: total of 20.5 is less than fail-under=20.6\n"), - (20.12345, "report --fail-under=20.1235 --precision=5", 2, - "Coverage failure: total of 20.12345 is less than fail-under=20.12350\n"), - (20.12339, "report --fail-under=20.1234 --precision=4", 0, ""), - ]) + @pytest.mark.parametrize( + "result, cmd, ret, msg", + [ + (20.5, "report --fail-under=20.4 --precision=1", 0, ""), + ( + 20.5, + "report --fail-under=20.6 --precision=1", + 2, + "Coverage failure: total of 20.5 is less than fail-under=20.6\n", + ), + ( + 20.12345, + "report --fail-under=20.1235 --precision=5", + 2, + "Coverage failure: total of 20.12345 is less than fail-under=20.12350\n", + ), + (20.12339, "report --fail-under=20.1234 --precision=4", 0, ""), + ], + ) def test_fail_under_with_precision(self, result: float, cmd: str, ret: int, msg: str) -> None: cov = CoverageReportingFake(report_result=result) - with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov): + with mock.patch("coverage.cmdline.Coverage", lambda *a, **kw: cov): self.command_line(cmd, ret) assert self.stdout() == msg diff --git a/tests/test_collector.py b/tests/test_collector.py index d33cb2639..5b4f7bd93 100644 --- a/tests/test_collector.py +++ b/tests/test_collector.py @@ -20,12 +20,17 @@ def test_should_trace_cache(self) -> None: # The tracers should only invoke should_trace once for each file name. # Make some files that invoke each other. - self.make_file("f1.py", """\ + self.make_file( + "f1.py", + """\ def f1(x, f): return f(x) - """) + """, + ) - self.make_file("f2.py", """\ + self.make_file( + "f2.py", + """\ import f1 def func(x): @@ -36,12 +41,13 @@ def otherfunc(x): for i in range(10): func(i) - """) + """, + ) # Trace one file, but not the other. CheckUniqueFilenames will assert # that _should_trace hasn't been called twice for the same file. cov = coverage.Coverage(include=["f1.py"]) - should_trace_hook = CheckUniqueFilenames.hook(cov, '_should_trace') + should_trace_hook = CheckUniqueFilenames.hook(cov, "_should_trace") # Import the Python file, executing it. self.start_import_stop(cov, "f2") diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py index 9e32ea87d..9bb26d283 100644 --- a/tests/test_concurrency.py +++ b/tests/test_concurrency.py @@ -57,9 +57,9 @@ def measurable_line(l: str) -> bool: l = l.strip() if not l: return False - if l.startswith('#'): + if l.startswith("#"): return False - if l.startswith('else:'): + if l.startswith("else:"): return False return True @@ -248,7 +248,7 @@ def try_some_code( print_simple_annotation(code, linenos) lines = line_count(code) - assert line_counts(data)['try_it.py'] == lines + assert line_counts(data)["try_it.py"] == lines def test_threads(self) -> None: code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) @@ -319,7 +319,9 @@ def do(): # Sometimes a test fails due to inherent randomness. Try more times. @pytest.mark.flaky(max_runs=3) def test_threads_with_gevent(self) -> None: - self.make_file("both.py", """\ + self.make_file( + "both.py", + """\ import queue import threading @@ -340,12 +342,11 @@ def gwork(q): answer = q.get() assert answer == 1 - """) + """, + ) _, out = self.run_command_status("coverage run --concurrency=thread,gevent both.py") if gevent is None: - assert out == ( - "Couldn't trace with concurrency=gevent, the module isn't installed.\n" - ) + assert out == ("Couldn't trace with concurrency=gevent, the module isn't installed.\n") pytest.skip("Can't run test without gevent installed.") if not testenv.C_TRACER: assert out == ( @@ -388,7 +389,7 @@ class WithoutConcurrencyModuleTest(CoverageTest): @pytest.mark.parametrize("module", ["eventlet", "gevent", "greenlet"]) def test_missing_module(self, module: str) -> None: self.make_file("prog.py", "a = 1") - sys.modules[module] = None # type: ignore[assignment] + sys.modules[module] = None # type: ignore[assignment] msg = f"Couldn't trace with concurrency={module}, the module isn't installed." with pytest.raises(ConfigError, match=msg): self.command_line(f"run --concurrency={module} prog.py") @@ -452,7 +453,7 @@ def start_method_fixture(request: pytest.FixtureRequest) -> str: # Sometimes a test fails due to inherent randomness. Try more times. -#@pytest.mark.flaky(max_runs=30) +# @pytest.mark.flaky(max_runs=30) class MultiprocessingTest(CoverageTest): """Test support of the multiprocessing module.""" @@ -468,11 +469,14 @@ def try_multiprocessing_code( ) -> None: """Run code using multiprocessing, it should produce `expected_out`.""" self.make_file("multi.py", code) - self.make_file(".coveragerc", f"""\ + self.make_file( + ".coveragerc", + f"""\ [run] concurrency = {concurrency} source = . - """) + """, + ) cmd = f"coverage run {args} multi.py {start_method}" _, out = self.run_command_status(cmd) @@ -506,7 +510,7 @@ def test_multiprocessing_simple(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) - total = sum(x*x if x%2 else x*x*x for x in range(upto)) + total = sum(x * x if x % 2 else x * x * x for x in range(upto)) expected_out = f"{nprocs} pids, {total = }" self.try_multiprocessing_code( code, @@ -520,7 +524,7 @@ def test_multiprocessing_append(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) - total = sum(x*x if x%2 else x*x*x for x in range(upto)) + total = sum(x * x if x % 2 else x * x * x for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" self.try_multiprocessing_code( code, @@ -534,9 +538,9 @@ def test_multiprocessing_append(self, start_method: str) -> None: def test_multiprocessing_and_gevent(self, start_method: str) -> None: nprocs = 3 upto = 30 - code = ( - SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE - ).format(NPROCS=nprocs, UPTO=upto) + code = (SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE).format( + NPROCS=nprocs, UPTO=upto + ) total = sum(sum(range((x + 1) * 100)) for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" self.try_multiprocessing_code( @@ -552,26 +556,27 @@ def test_multiprocessing_with_branching(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) - total = sum(x*x if x%2 else x*x*x for x in range(upto)) + total = sum(x * x if x % 2 else x * x * x for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" expect_warn = ( - env.PYBEHAVIOR.pep669 - and (not env.PYBEHAVIOR.branch_right_left) - and testenv.SYS_MON + env.PYBEHAVIOR.pep669 and (not env.PYBEHAVIOR.branch_right_left) and testenv.SYS_MON ) self.make_file("multi.py", code) - self.make_file("multi.rc", """\ + self.make_file( + "multi.rc", + """\ [run] concurrency = multiprocessing branch = True omit = */site-packages/* - """ + ("disable_warnings = no-sysmon" if expect_warn else "") - ) + """ + + ("disable_warnings = no-sysmon" if expect_warn else ""), + ) out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}") assert out.rstrip() == expected_out - out = self.run_command("coverage combine -q") # sneak in a test of -q + out = self.run_command("coverage combine -q") # sneak in a test of -q assert out == "" out = self.run_command("coverage report -m") @@ -580,17 +585,23 @@ def test_multiprocessing_with_branching(self, start_method: str) -> None: def test_multiprocessing_bootstrap_error_handling(self) -> None: # An exception during bootstrapping will be reported. - self.make_file("multi.py", """\ + self.make_file( + "multi.py", + """\ import multiprocessing if __name__ == "__main__": with multiprocessing.Manager(): pass - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] concurrency = multiprocessing _crash = _bootstrap - """) + """, + ) out = self.run_command("coverage run multi.py", status=1) assert "Exception during multiprocessing bootstrap init" in out assert "RuntimeError: Crashing because called by _bootstrap" in out @@ -598,7 +609,9 @@ def test_multiprocessing_bootstrap_error_handling(self) -> None: def test_bug_890(self) -> None: # chdir in multiprocessing shouldn't keep us from finding the # .coveragerc file. - self.make_file("multi.py", """\ + self.make_file( + "multi.py", + """\ import multiprocessing, os, os.path if __name__ == "__main__": if not os.path.exists("./tmp"): os.mkdir("./tmp") @@ -606,11 +619,15 @@ def test_bug_890(self) -> None: with multiprocessing.Manager(): pass print("ok") - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] concurrency = multiprocessing - """) + """, + ) out = self.run_command("coverage run multi.py") assert out.splitlines()[-1] == "ok" @@ -620,7 +637,7 @@ def test_coverage_stop_in_threads() -> None: has_started_coverage = [] has_stopped_coverage = [] - def run_thread() -> None: # pragma: nested + def run_thread() -> None: # pragma: nested """Check that coverage is stopping properly in threads.""" deadline = time.time() + 5 ident = threading.current_thread().ident @@ -666,7 +683,7 @@ def test_thread_safe_save_data(tmp_path: pathlib.Path) -> None: for module_name in module_names: import_local_file(module_name) - def random_load() -> None: # pragma: nested + def random_load() -> None: # pragma: nested """Import modules randomly to stress coverage.""" while should_run[0]: module_name = random.choice(module_names) @@ -696,7 +713,7 @@ def random_load() -> None: # pragma: nested for t in threads: t.join() - if (not imported) and duration < 10: # pragma: only failure + if (not imported) and duration < 10: # pragma: only failure duration *= 2 finally: @@ -712,7 +729,9 @@ class SigtermTest(CoverageTest): @pytest.mark.parametrize("sigterm", [False, True]) def test_sigterm_multiprocessing_saves_data(self, sigterm: bool) -> None: # A terminated process should save its coverage data. - self.make_file("clobbered.py", """\ + self.make_file( + "clobbered.py", + """\ import multiprocessing import time @@ -733,13 +752,17 @@ def subproc(x): time.sleep(.05) proc.terminate() print("END", flush=True) - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] parallel = True concurrency = multiprocessing - """ + ("sigterm = true" if sigterm else ""), - ) + """ + + ("sigterm = true" if sigterm else ""), + ) out = self.run_command("coverage run clobbered.py") # Under Linux, things go wrong. Does that matter? if env.LINUX and "assert self._collectors" in out: @@ -756,20 +779,26 @@ def subproc(x): def test_sigterm_threading_saves_data(self) -> None: # A terminated process should save its coverage data. - self.make_file("handler.py", """\ + self.make_file( + "handler.py", + """\ import os, signal print("START", flush=True) print("SIGTERM", flush=True) os.kill(os.getpid(), signal.SIGTERM) print("NOT HERE", flush=True) - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] # The default concurrency option. concurrency = thread sigterm = true - """) + """, + ) status, out = self.run_command_status("coverage run handler.py") assert status != 0 out_lines = out.splitlines() @@ -783,7 +812,9 @@ def test_sigterm_threading_saves_data(self) -> None: def test_sigterm_still_runs(self) -> None: # A terminated process still runs its own SIGTERM handler. - self.make_file("handler.py", """\ + self.make_file( + "handler.py", + """\ import multiprocessing import signal import time @@ -808,12 +839,16 @@ def on_sigterm(signum, frame): while x.value != 0: time.sleep(.02) proc.terminate() - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] parallel = True concurrency = multiprocessing sigterm = True - """) + """, + ) out = self.run_command("coverage run handler.py") assert out == "START\nSIGTERM\nEND\n" diff --git a/tests/test_config.py b/tests/test_config.py index c4d011373..e74758ed3 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -41,12 +41,15 @@ def test_arguments(self) -> None: def test_config_file(self) -> None: # A .coveragerc file will be read into the configuration. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ # This is just a bogus .rc file for testing. [run] timid = True data_file = .hello_kitty.data - """) + """, + ) cov = coverage.Coverage() assert cov.config.timid assert not cov.config.branch @@ -55,12 +58,15 @@ def test_config_file(self) -> None: @pytest.mark.parametrize("file_class", FilePathClasses) def test_named_config_file(self, file_class: FilePathType) -> None: # You can name the config file what you like. - self.make_file("my_cov.ini", """\ + self.make_file( + "my_cov.ini", + """\ [run] timid = True ; I wouldn't really use this as a data file... data_file = delete.me - """) + """, + ) cov = coverage.Coverage(config_file=file_class("my_cov.ini")) assert cov.config.timid assert not cov.config.branch @@ -68,7 +74,9 @@ def test_named_config_file(self, file_class: FilePathType) -> None: def test_toml_config_file(self) -> None: # A pyproject.toml file will be read into the configuration. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ # This is just a bogus toml file for testing. [tool.somethingelse] authors = ["Joe D'Ávila "] @@ -84,7 +92,8 @@ def test_toml_config_file(self) -> None: title = "tabblo & ÂĢĪ„ÎąĐ‘ĐŦâ„“ĪƒÂģ" [tool.coverage.plugins.a_plugin] hello = "world" - """) + """, + ) cov = coverage.Coverage() assert cov.config.timid assert not cov.config.branch @@ -98,22 +107,28 @@ def test_toml_config_file(self) -> None: def test_toml_ints_can_be_floats(self) -> None: # Test that our class doesn't reject integers when loading floats - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ # This is just a bogus toml file for testing. [tool.coverage.report] fail_under = 90 - """) + """, + ) cov = coverage.Coverage() assert cov.config.fail_under == 90 assert isinstance(cov.config.fail_under, float) def test_ignored_config_file(self) -> None: # You can disable reading the .coveragerc file. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] timid = True data_file = delete.me - """) + """, + ) cov = coverage.Coverage(config_file=False) assert not cov.config.timid assert not cov.config.branch @@ -121,11 +136,14 @@ def test_ignored_config_file(self) -> None: def test_config_file_then_args(self) -> None: # The arguments override the .coveragerc file. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] timid = True data_file = weirdo.file - """) + """, + ) cov = coverage.Coverage(timid=False, data_file=".mycov") assert not cov.config.timid assert not cov.config.branch @@ -133,11 +151,14 @@ def test_config_file_then_args(self) -> None: def test_data_file_from_environment(self) -> None: # There's an environment variable for the data_file. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] timid = True data_file = weirdo.file - """) + """, + ) self.set_environ("COVERAGE_FILE", "fromenv.dat") cov = coverage.Coverage() assert cov.config.data_file == "fromenv.dat" @@ -146,19 +167,25 @@ def test_data_file_from_environment(self) -> None: assert cov.config.data_file == "fromarg.dat" def test_debug_from_environment(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] debug = dataio, pids - """) + """, + ) self.set_environ("COVERAGE_DEBUG", "callers, fooey") cov = coverage.Coverage() assert cov.config.debug == ["dataio", "pids", "callers", "fooey"] def test_rcfile_from_environment(self) -> None: - self.make_file("here.ini", """\ + self.make_file( + "here.ini", + """\ [run] data_file = overthere.dat - """) + """, + ) self.set_environ("COVERAGE_RCFILE", "here.ini") cov = coverage.Coverage() assert cov.config.data_file == "overthere.dat" @@ -171,14 +198,20 @@ def test_missing_rcfile_from_environment(self) -> None: @pytest.mark.parametrize("force", [False, True]) def test_force_environment(self, force: bool) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] debug = dataio, pids - """) - self.make_file("force.ini", """\ + """, + ) + self.make_file( + "force.ini", + """\ [run] debug = callers, fooey - """) + """, + ) if force: self.set_environ("COVERAGE_FORCE_CONFIG", "force.ini") cov = coverage.Coverage() @@ -187,56 +220,82 @@ def test_force_environment(self, force: bool) -> None: else: assert cov.config.debug == ["dataio", "pids"] - @pytest.mark.parametrize("bad_config, msg", [ - ("[run]\ntimid = maybe?\n", r"maybe[?]"), - ("timid = 1\n", r"no section headers"), - ("[run\n", r"\[run"), - ("[report]\nexclude_lines = foo(\n", - r"Invalid \[report\].exclude_lines value 'foo\(': " + - r"(unbalanced parenthesis|missing \))"), - ("[report]\nexclude_also = foo(\n", - r"Invalid \[report\].exclude_also value 'foo\(': " + - r"(unbalanced parenthesis|missing \))"), - ("[report]\npartial_branches = foo[\n", - r"Invalid \[report\].partial_branches value 'foo\[': " + - r"(unexpected end of regular expression|unterminated character set)"), - ("[report]\npartial_also = foo[\n", - r"Invalid \[report\].partial_also value 'foo\[': " + - r"(unexpected end of regular expression|unterminated character set)"), - ("[report]\npartial_branches_always = foo***\n", - r"Invalid \[report\].partial_branches_always value " + - r"'foo\*\*\*': " + - r"multiple repeat"), - ]) + @pytest.mark.parametrize( + "bad_config, msg", + [ + ("[run]\ntimid = maybe?\n", r"maybe[?]"), + ("timid = 1\n", r"no section headers"), + ("[run\n", r"\[run"), + ( + "[report]\nexclude_lines = foo(\n", + r"Invalid \[report\].exclude_lines value 'foo\(': " + + r"(unbalanced parenthesis|missing \))", + ), + ( + "[report]\nexclude_also = foo(\n", + r"Invalid \[report\].exclude_also value 'foo\(': " + + r"(unbalanced parenthesis|missing \))", + ), + ( + "[report]\npartial_branches = foo[\n", + r"Invalid \[report\].partial_branches value 'foo\[': " + + r"(unexpected end of regular expression|unterminated character set)", + ), + ( + "[report]\npartial_also = foo[\n", + r"Invalid \[report\].partial_also value 'foo\[': " + + r"(unexpected end of regular expression|unterminated character set)", + ), + ( + "[report]\npartial_branches_always = foo***\n", + r"Invalid \[report\].partial_branches_always value " + + r"'foo\*\*\*': " + + r"multiple repeat", + ), + ], + ) def test_parse_errors(self, bad_config: str, msg: str) -> None: # Im-parsable values raise ConfigError, with details. self.make_file(".coveragerc", bad_config) with pytest.raises(ConfigError, match=msg): coverage.Coverage() - @pytest.mark.parametrize("bad_config, msg", [ - ("[tool.coverage.run]\ntimid = \"maybe?\"\n", r"maybe[?]"), - ("[tool.coverage.run\n", None), - ('[tool.coverage.report]\nexclude_lines = ["foo("]\n', - r"Invalid \[tool.coverage.report\].exclude_lines value 'foo\(': " + - r"(unbalanced parenthesis|missing \))"), - ('[tool.coverage.report]\nexclude_also = ["foo("]\n', - r"Invalid \[tool.coverage.report\].exclude_also value 'foo\(': " + - r"(unbalanced parenthesis|missing \))"), - ('[tool.coverage.report]\npartial_branches = ["foo["]\n', - r"Invalid \[tool.coverage.report\].partial_branches value 'foo\[': " + - r"(unexpected end of regular expression|unterminated character set)"), - ('[tool.coverage.report]\npartial_also = ["foo["]\n', - r"Invalid \[tool.coverage.report\].partial_also value 'foo\[': " + - r"(unexpected end of regular expression|unterminated character set)"), - ('[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', - r"Invalid \[tool.coverage.report\].partial_branches_always value " + - r"'foo\*\*\*': " + - r"multiple repeat"), - ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), - ("[tool.coverage.report]\nprecision=1.23", "not an integer"), - ('[tool.coverage.report]\nfail_under="s"', "couldn't convert to a float"), - ]) + @pytest.mark.parametrize( + "bad_config, msg", + [ + ('[tool.coverage.run]\ntimid = "maybe?"\n', r"maybe[?]"), + ("[tool.coverage.run\n", None), + ( + '[tool.coverage.report]\nexclude_lines = ["foo("]\n', + r"Invalid \[tool.coverage.report\].exclude_lines value 'foo\(': " + + r"(unbalanced parenthesis|missing \))", + ), + ( + '[tool.coverage.report]\nexclude_also = ["foo("]\n', + r"Invalid \[tool.coverage.report\].exclude_also value 'foo\(': " + + r"(unbalanced parenthesis|missing \))", + ), + ( + '[tool.coverage.report]\npartial_branches = ["foo["]\n', + r"Invalid \[tool.coverage.report\].partial_branches value 'foo\[': " + + r"(unexpected end of regular expression|unterminated character set)", + ), + ( + '[tool.coverage.report]\npartial_also = ["foo["]\n', + r"Invalid \[tool.coverage.report\].partial_also value 'foo\[': " + + r"(unexpected end of regular expression|unterminated character set)", + ), + ( + '[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', + r"Invalid \[tool.coverage.report\].partial_branches_always value " + + r"'foo\*\*\*': " + + r"multiple repeat", + ), + ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), + ("[tool.coverage.report]\nprecision=1.23", "not an integer"), + ('[tool.coverage.report]\nfail_under="s"', "couldn't convert to a float"), + ], + ) def test_toml_parse_errors(self, bad_config: str, msg: str) -> None: # Im-parsable values raise ConfigError, with details. self.make_file("pyproject.toml", bad_config) @@ -245,7 +304,9 @@ def test_toml_parse_errors(self, bad_config: str, msg: str) -> None: def test_environment_vars_in_config(self) -> None: # Config files can have $envvars in them. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] data_file = $DATA_FILE.fooey branch = $OKAY @@ -256,7 +317,8 @@ def test_environment_vars_in_config(self) -> None: x${THING}y x${NOTHING}y huh$${X}what - """) + """, + ) self.set_environ("DATA_FILE", "hello-world") self.set_environ("THING", "ZZZ") self.set_environ("OKAY", "yes") @@ -267,7 +329,9 @@ def test_environment_vars_in_config(self) -> None: def test_environment_vars_in_toml_config(self) -> None: # Config files can have $envvars in them. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.run] data_file = "$DATA_FILE.fooey" branch = "$BRANCH" @@ -285,7 +349,8 @@ def test_environment_vars_in_toml_config(self) -> None: # This reproduces the failure from https://github.com/nedbat/coveragepy/issues/1481 # When OTHER has a backslash that isn't a valid escape, like \\z (see below). something = "if [ $OTHER ]; then printf '%s\\n' 'Hi'; fi" - """) + """, + ) self.set_environ("BRANCH", "true") self.set_environ("DIGITS", "3") self.set_environ("FAIL_UNDER", "90.5") @@ -300,7 +365,9 @@ def test_environment_vars_in_toml_config(self) -> None: def test_tilde_in_config(self) -> None: # Config entries that are file paths can be tilde-expanded. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] data_file = ~/data.file @@ -326,13 +393,16 @@ def test_tilde_in_config(self) -> None: mapping = ~/src ~joe/source - """) + """, + ) self.assert_tilde_results() def test_tilde_in_toml_config(self) -> None: # Config entries that are file paths can be tilde-expanded. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.run] data_file = "~/data.file" @@ -360,12 +430,14 @@ def test_tilde_in_toml_config(self) -> None: "~/src", "~joe/source", ] - """) + """, + ) self.assert_tilde_results() def assert_tilde_results(self) -> None: """Common assertions for two tilde tests.""" + def expanduser(s: str) -> str: """Fake tilde expansion""" s = s.replace("~/", "/Users/me/") @@ -374,8 +446,8 @@ def expanduser(s: str) -> str: with mock.patch.object( coverage.config.os.path, # type: ignore[attr-defined] - 'expanduser', - new=expanduser + "expanduser", + new=expanduser, ): cov = coverage.Coverage() assert cov.config.data_file == "/Users/me/data.file" @@ -384,7 +456,7 @@ def expanduser(s: str) -> str: assert cov.config.lcov_output == "/Users/me/lcov/~foo.lcov" assert cov.config.xml_output == "/Users/me/somewhere/xml.out" assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"] - assert cov.config.paths == {'mapping': ['/Users/me/src', '/Users/joe/source']} + assert cov.config.paths == {"mapping": ["/Users/me/src", "/Users/joe/source"]} def test_tweaks_after_constructor(self) -> None: # set_option can be used after construction to affect the config. @@ -400,7 +472,9 @@ def test_tweaks_after_constructor(self) -> None: assert cov.get_option("run:data_file") == "fooey.dat" def test_tweaks_paths_after_constructor(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [paths] first = /first/1 @@ -409,7 +483,8 @@ def test_tweaks_paths_after_constructor(self) -> None: second = /second/a /second/b - """) + """, + ) old_paths = { "first": ["/first/1", "/first/2"], "second": ["/second/a", "/second/b"], @@ -452,59 +527,77 @@ def test_tweak_plugin_options(self) -> None: _ = cov.get_option("no_such.plugin:foo") def test_unknown_option(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] xyzzy = 17 - """) + """, + ) msg = r"Unrecognized option '\[run\] xyzzy=' in config file .coveragerc" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_unknown_option_toml(self) -> None: - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.run] xyzzy = 17 - """) + """, + ) msg = r"Unrecognized option '\[tool.coverage.run\] xyzzy=' in config file pyproject.toml" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_unknown_patch(self) -> None: self.make_file("foo.py", "a = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] patch = _exit xyzzy - """) + """, + ) msg = "Unknown patch 'xyzzy'" with pytest.raises(ConfigError, match=msg): cov = coverage.Coverage() self.start_import_stop(cov, "foo") def test_misplaced_option(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [report] branch = True - """) + """, + ) msg = r"Unrecognized option '\[report\] branch=' in config file .coveragerc" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_unknown_option_in_other_ini_file(self) -> None: - self.make_file("setup.cfg", """\ + self.make_file( + "setup.cfg", + """\ [coverage:run] huh = what? - """) + """, + ) msg = r"Unrecognized option '\[coverage:run\] huh=' in config file setup.cfg" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_exceptions_from_missing_things(self) -> None: - self.make_file("config.ini", """\ + self.make_file( + "config.ini", + """\ [run] branch = True - """) + """, + ) config = HandyConfigParser(True) config.read(["config.ini"]) with pytest.raises(ConfigError, match="No section: 'xyzzy'"): @@ -513,20 +606,26 @@ def test_exceptions_from_missing_things(self) -> None: config.get("xyzzy", "foo") def test_exclude_also(self) -> None: - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.report] exclude_also = ["foobar", "raise .*Error"] - """) + """, + ) cov = coverage.Coverage() expected = coverage.config.DEFAULT_EXCLUDE + ["foobar", "raise .*Error"] assert cov.config.exclude_list == expected def test_partial_also(self) -> None: - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.report] partial_also = ["foobar", "raise .*Error"] - """) + """, + ) cov = coverage.Coverage() expected = coverage.config.DEFAULT_PARTIAL + ["foobar", "raise .*Error"] @@ -539,18 +638,24 @@ def test_core_option(self) -> None: default_core = cov.config.core core_to_set = "ctrace" if default_core == "pytrace" else "pytrace" - self.make_file(".coveragerc", f"""\ + self.make_file( + ".coveragerc", + f"""\ [run] core = {core_to_set} - """) + """, + ) cov = coverage.Coverage() assert cov.config.core == core_to_set os.remove(".coveragerc") - self.make_file("pyproject.toml", f"""\ + self.make_file( + "pyproject.toml", + f"""\ [tool.coverage.run] core = "{core_to_set}" - """) + """, + ) cov = coverage.Coverage() assert cov.config.core == core_to_set @@ -693,13 +798,13 @@ def assert_config_settings_are_correct(self, cov: Coverage) -> None: assert cov.config.xml_package_depth == 17 assert cov.config.paths == { - 'source': ['.', '/home/ned/src/'], - 'other': ['other', '/home/ned/other', 'c:\\Ned\\etc'], + "source": [".", "/home/ned/src/"], + "other": ["other", "/home/ned/other", "c:\\Ned\\etc"], } assert cov.config.get_plugin_options("plugins.a_plugin") == { - 'hello': 'world', - 'names': 'Jane/John/Jenny', + "hello": "world", + "names": "Jane/John/Jenny", } assert cov.config.get_plugin_options("plugins.another") == {} assert cov.config.json_show_contexts is True @@ -739,15 +844,21 @@ def test_config_file_settings_in_tox_if_coveragerc_specified(self) -> None: def check_other_not_read_if_coveragerc(self, fname: str) -> None: """Check config `fname` is not read if .coveragerc exists.""" - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] include = foo - """) - self.make_file(fname, """\ + """, + ) + self.make_file( + fname, + """\ [coverage:run] omit = bar branch = true - """) + """, + ) cov = coverage.Coverage() assert cov.config.run_include == ["foo"] assert cov.config.run_omit == [] @@ -761,11 +872,14 @@ def test_toxini_only_if_not_coveragerc(self) -> None: def check_other_config_need_prefixes(self, fname: str) -> None: """Check that `fname` sections won't be read if un-prefixed.""" - self.make_file(fname, """\ + self.make_file( + fname, + """\ [run] omit = bar branch = true - """) + """, + ) cov = coverage.Coverage() assert cov.config.run_omit == [] assert cov.config.branch is False @@ -793,7 +907,9 @@ def test_read_prefixed_sections_from_explicit_file(self) -> None: self.assert_config_settings_are_correct(cov) def test_non_ascii(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [report] exclude_lines = first @@ -801,7 +917,8 @@ def test_non_ascii(self) -> None: third [html] title = tabblo & ÂĢĪ„ÎąĐ‘ĐŦâ„“ĪƒÂģ # numbers - """) + """, + ) self.set_environ("TOX_ENVNAME", "weirdo") cov = coverage.Coverage() @@ -841,11 +958,14 @@ def test_no_toml_installed_explicit_toml(self) -> None: @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_toml(self) -> None: # Can't have coverage config in pyproject.toml without toml installed. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ # A toml file! [tool.coverage.run] xyzzy = 17 - """) + """, + ) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Can't read 'pyproject.toml' without TOML support" with pytest.raises(ConfigError, match=msg): @@ -854,11 +974,14 @@ def test_no_toml_installed_pyproject_toml(self) -> None: @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_toml_shorter_syntax(self) -> None: # Can't have coverage config in pyproject.toml without toml installed. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ # A toml file! [tool.coverage] run.parallel = true - """) + """, + ) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Can't read 'pyproject.toml' without TOML support" with pytest.raises(ConfigError, match=msg): @@ -867,11 +990,14 @@ def test_no_toml_installed_pyproject_toml_shorter_syntax(self) -> None: @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_no_coverage(self) -> None: # It's ok to have non-coverage pyproject.toml without toml installed. - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ # A toml file! [tool.something] xyzzy = 17 - """) + """, + ) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): cov = coverage.Coverage() # We get default settings: @@ -880,10 +1006,13 @@ def test_no_toml_installed_pyproject_no_coverage(self) -> None: assert cov.config.data_file == ".coverage" def test_exceptions_from_missing_toml_things(self) -> None: - self.make_file("pyproject.toml", """\ + self.make_file( + "pyproject.toml", + """\ [tool.coverage.run] branch = true - """) + """, + ) config = TomlConfigParser(False) config.read("pyproject.toml") with pytest.raises(ConfigError, match="No section: 'xyzzy'"): diff --git a/tests/test_context.py b/tests/test_context.py index 5c0618c02..2a719e04f 100644 --- a/tests/test_context.py +++ b/tests/test_context.py @@ -72,23 +72,23 @@ def test_combining_line_contexts(self) -> None: for data in datas: combined.update(data) - assert combined.measured_contexts() == {'red', 'blue'} + assert combined.measured_contexts() == {"red", "blue"} full_names = {os.path.basename(f): f for f in combined.measured_files()} - assert_count_equal(full_names, ['red.py', 'blue.py']) + assert_count_equal(full_names, ["red.py", "blue.py"]) - fred = full_names['red.py'] - fblue = full_names['blue.py'] + fred = full_names["red.py"] + fblue = full_names["blue.py"] def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.lines(filename) == lines - assert_combined_lines(fred, 'red', self.LINES) - assert_combined_lines(fred, 'blue', []) - assert_combined_lines(fblue, 'red', []) - assert_combined_lines(fblue, 'blue', self.LINES) + assert_combined_lines(fred, "red", self.LINES) + assert_combined_lines(fred, "blue", []) + assert_combined_lines(fblue, "red", []) + assert_combined_lines(fblue, "blue", self.LINES) def test_combining_arc_contexts(self) -> None: red_data, blue_data = self.run_red_blue(branch=True) @@ -105,33 +105,33 @@ def test_combining_arc_contexts(self) -> None: for data in datas: combined.update(data) - assert combined.measured_contexts() == {'red', 'blue'} + assert combined.measured_contexts() == {"red", "blue"} full_names = {os.path.basename(f): f for f in combined.measured_files()} - assert_count_equal(full_names, ['red.py', 'blue.py']) + assert_count_equal(full_names, ["red.py", "blue.py"]) - fred = full_names['red.py'] - fblue = full_names['blue.py'] + fred = full_names["red.py"] + fblue = full_names["blue.py"] def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.lines(filename) == lines - assert_combined_lines(fred, 'red', self.LINES) - assert_combined_lines(fred, 'blue', []) - assert_combined_lines(fblue, 'red', []) - assert_combined_lines(fblue, 'blue', self.LINES) + assert_combined_lines(fred, "red", self.LINES) + assert_combined_lines(fred, "blue", []) + assert_combined_lines(fblue, "red", []) + assert_combined_lines(fblue, "blue", self.LINES) def assert_combined_arcs(filename: str, context: str, lines: list[TArc]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.arcs(filename) == lines - assert_combined_arcs(fred, 'red', arc_data) - assert_combined_arcs(fred, 'blue', []) - assert_combined_arcs(fblue, 'red', []) - assert_combined_arcs(fblue, 'blue', arc_data) + assert_combined_arcs(fred, "red", arc_data) + assert_combined_arcs(fred, "blue", []) + assert_combined_arcs(fblue, "red", []) + assert_combined_arcs(fblue, "blue", arc_data) @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core") @@ -216,12 +216,14 @@ def get_qualname() -> str | None: if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack): # We're calling ourselves recursively, maybe because we're testing # properties. Return an int to try to get back on track. - return 17 # type: ignore[return-value] + return 17 # type: ignore[return-value] caller_frame = stack[0][0] return qualname_from_frame(caller_frame) + # pylint: disable=missing-class-docstring, missing-function-docstring, unused-argument + class Parent: def meth(self) -> str | None: return get_qualname() @@ -230,27 +232,35 @@ def meth(self) -> str | None: def a_property(self) -> str | None: return get_qualname() + class Child(Parent): pass + class SomethingElse: pass + class MultiChild(SomethingElse, Child): pass + def no_arguments() -> str | None: return get_qualname() + def plain_old_function(a: Any, b: Any) -> str | None: return get_qualname() + def fake_out(self: Any) -> str | None: return get_qualname() + def patch_meth(self: Any) -> str | None: return get_qualname() + # pylint: enable=missing-class-docstring, missing-function-docstring, unused-argument @@ -285,12 +295,12 @@ def test_property(self) -> None: def test_changeling(self) -> None: c = Child() - c.meth = patch_meth # type: ignore[assignment] - assert c.meth(c) == "tests.test_context.patch_meth" # type: ignore[call-arg] + c.meth = patch_meth # type: ignore[assignment] + assert c.meth(c) == "tests.test_context.patch_meth" # type: ignore[call-arg] def test_bug_829(self) -> None: # A class with a name like a function shouldn't confuse qualname_from_frame. - class test_something: # pylint: disable=unused-variable + class test_something: # pylint: disable=unused-variable assert get_qualname() is None def test_bug_1210(self) -> None: diff --git a/tests/test_coverage.py b/tests/test_coverage.py index 194a2e120..fbfd9395a 100644 --- a/tests/test_coverage.py +++ b/tests/test_coverage.py @@ -19,33 +19,37 @@ class TestCoverageTest(CoverageTest): def test_successful_coverage(self) -> None: # The simplest run possible. - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 """, - lines=[1,2], + lines=[1, 2], ) # You can provide a list of possible statement matches. - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 """, - lines=([100], [1,2], [1723,47]), + lines=([100], [1, 2], [1723, 47]), ) # You can specify missing lines. - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a == 2: a = 3 """, - lines=[1,2,3], + lines=[1, 2, 3], missing="3", ) def test_failed_coverage(self) -> None: # If the lines are wrong, the message shows right and wrong. with pytest.raises(AssertionError, match=r"\[1, 2] != \[1]"): - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 """, @@ -54,7 +58,8 @@ def test_failed_coverage(self) -> None: # If the list of lines possibilities is wrong, the msg shows right. msg = r"None of the lines choices matched \[1, 2]" with pytest.raises(AssertionError, match=msg): - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 """, @@ -62,26 +67,29 @@ def test_failed_coverage(self) -> None: ) # If the missing lines are wrong, the message shows right and wrong. with pytest.raises(AssertionError, match=r"'3' != '37'"): - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a == 2: a = 3 """, - lines=[1,2,3], + lines=[1, 2, 3], missing="37", ) def test_exceptions_really_fail(self) -> None: # An assert in the checked code will really raise up to us. with pytest.raises(AssertionError, match="This is bad"): - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 assert a == 99, "This is bad" """, ) # Other exceptions too. with pytest.raises(ZeroDivisionError, match="division"): - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 assert a == 1, "This is good" a/0 @@ -93,7 +101,8 @@ class BasicCoverageTest(CoverageTest): """The simplest tests, for quick smoke testing of fundamental changes.""" def test_simple(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 @@ -101,23 +110,25 @@ def test_simple(self) -> None: # Nothing here d = 6 """, - lines=[1,2,4,6], + lines=[1, 2, 4, 6], report="4 0 0 0 100%", ) def test_indentation_wackiness(self) -> None: # Partial final lines are OK. - self.check_coverage("""\ + self.check_coverage( + """\ import sys if not sys.path: a = 1 - """, # indented last line - lines=[1,2,3], + """, # indented last line + lines=[1, 2, 3], missing="3", ) def test_multiline_initializer(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ d = { 'foo': 1+2, 'bar': (lambda x: x+1)(1), @@ -126,19 +137,20 @@ def test_multiline_initializer(self) -> None: e = { 'foo': 1, 'bar': 2 } """, - lines=[1,7], + lines=[1, 7], missing="", ) def test_list_comprehension(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ l = [ 2*i for i in range(10) if i > 5 ] assert l == [12, 14, 16, 18] """, - lines=[1,5], + lines=[1, 5], missing="", ) @@ -150,41 +162,46 @@ def test_expression(self) -> None: # Bare expressions as statements are tricky: some implementations # optimize some of them away. All implementations seem to count # the implicit return at the end as executable. - self.check_coverage("""\ + self.check_coverage( + """\ 12 23 """, - lines=([1,2],[2]), + lines=([1, 2], [2]), missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ 12 23 a = 3 """, - lines=([1,2,3],[3]), + lines=([1, 2, 3], [3]), missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ 1 + 2 1 + \\ 2 """, - lines=([1,2], [2]), + lines=([1, 2], [2]), missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ 1 + 2 1 + \\ 2 a = 4 """, - lines=([1,2,4], [4]), + lines=([1, 2, 4], [4]), missing="", ) def test_assert(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ assert (1 + 2) assert (1 + 2) @@ -193,35 +210,38 @@ def test_assert(self) -> None: 2), \\ 'something is amiss' """, - lines=[1,2,4,5], + lines=[1, 2, 4, 5], missing="", ) def test_assignment(self) -> None: # Simple variable assignment - self.check_coverage("""\ + self.check_coverage( + """\ a = (1 + 2) b = (1 + 2) c = \\ 1 """, - lines=[1,2,4], + lines=[1, 2, 4], missing="", ) def test_assign_tuple(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 a,b,c = 7,8,9 assert a == 7 and b == 8 and c == 9 """, - lines=[1,2,3], + lines=[1, 2, 3], missing="", ) def test_more_assignments(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ x = [] d = {} d[ @@ -239,7 +259,8 @@ def test_more_assignments(self) -> None: def test_attribute_assignment(self) -> None: # Attribute assignment - self.check_coverage("""\ + self.check_coverage( + """\ class obj: pass o = obj() o.foo = (1 + 2) @@ -248,12 +269,13 @@ class obj: pass o.foo = \\ 1 """, - lines=[1,2,3,4,6], + lines=[1, 2, 3, 4, 6], missing="", ) def test_list_of_attribute_assignment(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ class obj: pass o = obj() o.a, o.b = (1 + 2), 3 @@ -264,12 +286,13 @@ class obj: pass 1, \\ 2 """, - lines=[1,2,3,4,7], + lines=[1, 2, 3, 4, 7], missing="", ) def test_augmented_assignment(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 a += 1 a += (1 + @@ -277,12 +300,13 @@ def test_augmented_assignment(self) -> None: a += \\ 1 """, - lines=[1,2,3,5], + lines=[1, 2, 3, 5], missing="", ) def test_triple_string_stuff(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = ''' a multiline string. @@ -301,59 +325,65 @@ def test_triple_string_stuff(self) -> None: lines. ''') """, - lines=[1,5,11], + lines=[1, 5, 11], missing="", ) def test_pass(self) -> None: # pass is tricky: if it's the only statement in a block, then it is # "executed". But if it is not the only statement, then it is not. - self.check_coverage("""\ + self.check_coverage( + """\ if 1==1: pass """, - lines=[1,2], + lines=[1, 2], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def foo(): pass foo() """, - lines=[1,2,3], + lines=[1, 2, 3], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def foo(): "doc" pass foo() """, - lines=[1,3,4], + lines=[1, 3, 4], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ class Foo: def foo(self): pass Foo().foo() """, - lines=[1,2,3,4], + lines=[1, 2, 3, 4], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ class Foo: def foo(self): "Huh?" pass Foo().foo() """, - lines=[1,2,4,5], + lines=[1, 2, 4, 5], missing="", ) def test_del(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ d = { 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1 } del d['a'] del d[ @@ -364,12 +394,13 @@ def test_del(self) -> None: d['e'] assert(len(d.keys()) == 0) """, - lines=[1,2,3,6,9], + lines=[1, 2, 3, 6, 9], missing="", ) def test_raise(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ try: raise Exception( "hello %d" % @@ -377,18 +408,19 @@ def test_raise(self) -> None: except: pass """, - lines=[1,2,5,6], + lines=[1, 2, 5, 6], missing="", ) def test_raise_followed_by_statement(self) -> None: if env.PYBEHAVIOR.omit_after_jump: - lines = [1,2,4,5] + lines = [1, 2, 4, 5] missing = "" else: - lines = [1,2,3,4,5] + lines = [1, 2, 3, 4, 5] missing = "3" - self.check_coverage("""\ + self.check_coverage( + """\ try: raise Exception("hello") a = 3 @@ -400,7 +432,8 @@ def test_raise_followed_by_statement(self) -> None: ) def test_return(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def fn(): a = 1 return a @@ -408,10 +441,11 @@ def fn(): x = fn() assert(x == 1) """, - lines=[1,2,3,5,6], + lines=[1, 2, 3, 5, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def fn(): a = 1 return ( @@ -421,10 +455,11 @@ def fn(): x = fn() assert(x == 2) """, - lines=[1,2,3,7,8], + lines=[1, 2, 3, 7, 8], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def fn(): a = 1 return (a, @@ -434,18 +469,19 @@ def fn(): x,y,z = fn() assert x == 1 and y == 2 and z == 3 """, - lines=[1,2,3,7,8], + lines=[1, 2, 3, 7, 8], missing="", ) def test_return_followed_by_statement(self) -> None: if env.PYBEHAVIOR.omit_after_return: - lines = [1,2,3,6,7] + lines = [1, 2, 3, 6, 7] missing = "" else: - lines = [1,2,3,4,6,7] + lines = [1, 2, 3, 4, 6, 7] missing = "4" - self.check_coverage("""\ + self.check_coverage( + """\ def fn(): a = 2 return a @@ -459,7 +495,8 @@ def fn(): ) def test_yield(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def gen(): yield 1 yield (2+ @@ -470,44 +507,48 @@ def gen(): a,b,c = gen() assert a == 1 and b == 9 and c == (1,2) """, - lines=[1,2,3,6,8,9], + lines=[1, 2, 3, 6, 8, 9], missing="", ) def test_break(self) -> None: if env.PYBEHAVIOR.omit_after_jump: - lines = [1,2,3,5] + lines = [1, 2, 3, 5] missing = "" else: - lines = [1,2,3,4,5] + lines = [1, 2, 3, 4, 5] missing = "4" - self.check_coverage("""\ + self.check_coverage( + """\ for x in range(10): a = 2 + x break a = 4 assert a == 2 """, - lines=lines, missing=missing, + lines=lines, + missing=missing, ) def test_continue(self) -> None: if env.PYBEHAVIOR.omit_after_jump: - lines = [1,2,3,5] + lines = [1, 2, 3, 5] missing = "" else: - lines = [1,2,3,4,5] + lines = [1, 2, 3, 4, 5] missing = "4" - self.check_coverage("""\ + self.check_coverage( + """\ for x in range(10): a = 2 + x continue a = 4 assert a == 11 """, - lines=lines, missing=missing, + lines=lines, + missing=missing, ) def test_strange_unexecuted_continue(self) -> None: @@ -515,7 +556,8 @@ def test_strange_unexecuted_continue(self) -> None: # Peephole optimization of jumps to jumps can mean that some statements # never hit the line tracer. The behavior is different in different # versions of Python, so be careful when running this test. - self.check_coverage("""\ + self.check_coverage( + """\ a = b = c = 0 for n in range(100): if n % 2: @@ -538,29 +580,32 @@ def test_strange_unexecuted_continue(self) -> None: c += 1 assert a == 33 and b == 50 and c == 50 """, - lines=[1,2,3,4,5,6,8,9,10, 12,13,14,15,16,17,19,20,21], + lines=[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17, 19, 20, 21], missing="", ) def test_import(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import string from sys import path a = 1 """, - lines=[1,2,3], + lines=[1, 2, 3], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ import string if 1 == 2: from sys import path a = 1 """, - lines=[1,2,3,4], + lines=[1, 2, 3, 4], missing="3", ) - self.check_coverage("""\ + self.check_coverage( + """\ import string, \\ os, \\ re @@ -568,43 +613,48 @@ def test_import(self) -> None: stdout a = 1 """, - lines=[1,4,6], + lines=[1, 4, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ import sys, sys as s assert s.path == sys.path """, - lines=[1,2], + lines=[1, 2], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ import sys, \\ sys as s assert s.path == sys.path """, - lines=[1,3], + lines=[1, 3], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ from sys import path, \\ path as p assert p == path """, - lines=[1,3], + lines=[1, 3], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ from sys import \\ * assert len(path) > 0 """, - lines=[1,3], + lines=[1, 3], missing="", ) def test_global(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ g = h = i = 1 def fn(): global g @@ -614,22 +664,24 @@ def fn(): fn() assert g == 2 and h == 2 and i == 2 """, - lines=[1,2,6,7,8], + lines=[1, 2, 6, 7, 8], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ g = h = i = 1 def fn(): global g; g = 2 fn() assert g == 2 and h == 1 and i == 1 """, - lines=[1,2,3,4,5], + lines=[1, 2, 3, 4, 5], missing="", ) def test_exec(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = b = c = 1 exec("a = 2") exec("b = " + @@ -637,10 +689,11 @@ def test_exec(self) -> None: "2") assert a == 2 and b == 2 and c == 2 """, - lines=[1,2,3,6], + lines=[1, 2, 3, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ vars = {'a': 1, 'b': 1, 'c': 1} exec("a = 2", vars) exec("b = " + @@ -648,10 +701,11 @@ def test_exec(self) -> None: "2", vars) assert vars['a'] == 2 and vars['b'] == 2 and vars['c'] == 2 """, - lines=[1,2,3,6], + lines=[1, 2, 3, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ globs = {} locs = {'a': 1, 'b': 1, 'c': 1} exec("a = 2", globs, locs) @@ -660,21 +714,23 @@ def test_exec(self) -> None: "2", globs, locs) assert locs['a'] == 2 and locs['b'] == 2 and locs['c'] == 2 """, - lines=[1,2,3,4,7], + lines=[1, 2, 3, 4, 7], missing="", ) def test_extra_doc_string(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 "An extra docstring, should be a comment." b = 3 assert (a,b) == (1,3) """, - lines=([1,3,4], [1,2,3,4]), + lines=([1, 3, 4], [1, 2, 3, 4]), missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 "An extra docstring, should be a comment." b = 3 @@ -683,12 +739,13 @@ def test_extra_doc_string(self) -> None: c = 6 assert (a,b,c) == (1,3,6) """, - lines=([1,3,6,7], [1,2,3,4,5,6,7]), + lines=([1, 3, 6, 7], [1, 2, 3, 4, 5, 6, 7]), missing="", ) def test_nonascii(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ # coding: utf-8 a = 2 b = 3 @@ -697,14 +754,16 @@ def test_nonascii(self) -> None: ) def test_module_docstring(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ '''I am a module docstring.''' a = 2 b = 3 """, lines=[2, 3], ) - self.check_coverage("""\ + self.check_coverage( + """\ # Start with a comment, even though it doesn't change the behavior. '''I am a module docstring.''' a = 3 @@ -718,19 +777,21 @@ class CompoundStatementTest(CoverageTest): """Testing coverage of multi-line compound statements.""" def test_statement_list(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3 d = 4; e = 5; assert (a,b,c,d,e) == (1,2,3,4,5) """, - lines=[1,2,3,5], + lines=[1, 2, 3, 5], missing="", ) def test_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a == 1: x = 3 @@ -740,10 +801,11 @@ def test_if(self) -> None: x = 7 assert x == 7 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a == 1: x = 3 @@ -751,10 +813,11 @@ def test_if(self) -> None: y = 5 assert x == 3 """, - lines=[1,2,3,5,6], + lines=[1, 2, 3, 5, 6], missing="5", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 if a != 1: x = 3 @@ -762,10 +825,11 @@ def test_if(self) -> None: y = 5 assert y == 5 """, - lines=[1,2,3,5,6], + lines=[1, 2, 3, 5, 6], missing="3", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2 if a == 1: if b == 2: @@ -776,12 +840,13 @@ def test_if(self) -> None: z = 8 assert x == 4 """, - lines=[1,2,3,4,6,8,9], + lines=[1, 2, 3, 4, 6, 8, 9], missing="6-8", ) def test_elif(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a == 1: x = 3 @@ -791,11 +856,12 @@ def test_elif(self) -> None: z = 7 assert x == 3 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="4-7", report="7 3 4 1 45% 4-7", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a != 1: x = 3 @@ -805,11 +871,12 @@ def test_elif(self) -> None: z = 7 assert y == 5 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="3, 7", report="7 2 4 2 64% 3, 7", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a != 1: x = 3 @@ -819,13 +886,14 @@ def test_elif(self) -> None: z = 7 assert z == 7 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="3, 5", report="7 2 4 2 64% 3, 5", ) def test_elif_no_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a == 1: x = 3 @@ -833,11 +901,12 @@ def test_elif_no_else(self) -> None: y = 5 assert x == 3 """, - lines=[1,2,3,4,5,6], + lines=[1, 2, 3, 4, 5, 6], missing="4-5", report="6 2 4 1 50% 4-5", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a != 1: x = 3 @@ -845,13 +914,14 @@ def test_elif_no_else(self) -> None: y = 5 assert y == 5 """, - lines=[1,2,3,4,5,6], + lines=[1, 2, 3, 4, 5, 6], missing="3", report="6 1 4 2 70% 3, 4->6", ) def test_elif_bizarre(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def f(self): if self==1: x = 3 @@ -866,12 +936,13 @@ def f(self): else: x = 13 """, - lines=[1,2,3,4,5,6,7,8,9,10,11,13], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13], missing="2-13", ) def test_split_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if \\ a == 1: @@ -883,10 +954,11 @@ def test_split_if(self) -> None: z = 7 assert x == 3 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="5-9", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if \\ a != 1: @@ -898,10 +970,11 @@ def test_split_if(self) -> None: z = 7 assert y == 5 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="4, 9", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if \\ a != 1: @@ -913,12 +986,13 @@ def test_split_if(self) -> None: z = 7 assert z == 7 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="4, 7", ) def test_pathological_split_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if ( a == 1 @@ -932,10 +1006,11 @@ def test_pathological_split_if(self) -> None: z = 7 assert x == 3 """, - lines=[1,2,5,6,9,11,12], + lines=[1, 2, 5, 6, 9, 11, 12], missing="6-11", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if ( a != 1 @@ -949,10 +1024,11 @@ def test_pathological_split_if(self) -> None: z = 7 assert y == 5 """, - lines=[1,2,5,6,9,11,12], + lines=[1, 2, 5, 6, 9, 11, 12], missing="5, 11", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if ( a != 1 @@ -966,12 +1042,13 @@ def test_pathological_split_if(self) -> None: z = 7 assert z == 7 """, - lines=[1,2,5,6,9,11,12], + lines=[1, 2, 5, 6, 9, 11, 12], missing="5, 9", ) def test_absurd_split_if(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a == 1 \\ : @@ -983,10 +1060,11 @@ def test_absurd_split_if(self) -> None: z = 7 assert x == 3 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="5-9", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a != 1 \\ : @@ -998,10 +1076,11 @@ def test_absurd_split_if(self) -> None: z = 7 assert y == 5 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="4, 9", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2; c = 3; if a != 1 \\ : @@ -1013,7 +1092,7 @@ def test_absurd_split_if(self) -> None: z = 7 assert z == 7 """, - lines=[1,2,4,5,7,9,10], + lines=[1, 2, 4, 5, 7, 9, 10], missing="4, 7", ) @@ -1022,7 +1101,8 @@ def test_constant_if(self) -> None: lines = [1, 2, 3] else: lines = [2, 3] - self.check_coverage("""\ + self.check_coverage( + """\ if 1: a = 2 assert a == 2 @@ -1032,30 +1112,33 @@ def test_constant_if(self) -> None: ) def test_while(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while a: b += 1 a -= 1 assert a == 0 and b == 3 """, - lines=[1,2,3,4,5], + lines=[1, 2, 3, 4, 5], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while a: b += 1 break assert a == 3 and b == 1 """, - lines=[1,2,3,4,5], + lines=[1, 2, 3, 4, 5], missing="", ) def test_while_else(self) -> None: # Take the else branch. - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while a: b += 1 @@ -1064,11 +1147,12 @@ def test_while_else(self) -> None: b = 99 assert a == 0 and b == 99 """, - lines=[1,2,3,4,6,7], + lines=[1, 2, 3, 4, 6, 7], missing="", ) # Don't take the else branch. - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while a: b += 1 @@ -1078,12 +1162,13 @@ def test_while_else(self) -> None: b = 99 assert a == 2 and b == 1 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="7", ) def test_split_while(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while \\ a: @@ -1091,10 +1176,11 @@ def test_split_while(self) -> None: a -= 1 assert a == 0 and b == 3 """, - lines=[1,2,4,5,6], + lines=[1, 2, 4, 5, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 3; b = 0 while ( a @@ -1103,21 +1189,23 @@ def test_split_while(self) -> None: a -= 1 assert a == 0 and b == 3 """, - lines=[1,2,5,6,7], + lines=[1, 2, 5, 6, 7], missing="", ) def test_for(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for i in [1,2,3,4,5]: a += i assert a == 15 """, - lines=[1,2,3,4], + lines=[1, 2, 3, 4], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for i in [1, 2,3,4, @@ -1125,22 +1213,24 @@ def test_for(self) -> None: a += i assert a == 15 """, - lines=[1,2,5,6], + lines=[1, 2, 5, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for i in [1,2,3,4,5]: a += i break assert a == 1 """, - lines=[1,2,3,4,5], + lines=[1, 2, 3, 4, 5], missing="", ) def test_for_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for i in range(5): a += i+1 @@ -1148,10 +1238,11 @@ def test_for_else(self) -> None: a = 99 assert a == 99 """, - lines=[1,2,3,5,6], + lines=[1, 2, 3, 5, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for i in range(5): a += i+1 @@ -1160,22 +1251,24 @@ def test_for_else(self) -> None: a = 123 assert a == 1 """, - lines=[1,2,3,4,6,7], + lines=[1, 2, 3, 4, 6, 7], missing="6", ) def test_split_for(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for \\ i in [1,2,3,4,5]: a += i assert a == 15 """, - lines=[1,2,4,5], + lines=[1, 2, 4, 5], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 for \\ i in [1, @@ -1184,12 +1277,13 @@ def test_split_for(self) -> None: a += i assert a == 15 """, - lines=[1,2,6,7], + lines=[1, 2, 6, 7], missing="", ) def test_try_except(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1197,10 +1291,11 @@ def test_try_except(self) -> None: a = 99 assert a == 1 """, - lines=[1,2,3,4,5,6], + lines=[1, 2, 3, 4, 5, 6], missing="4-5", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1209,10 +1304,11 @@ def test_try_except(self) -> None: a = 99 assert a == 99 """, - lines=[1,2,3,4,5,6,7], + lines=[1, 2, 3, 4, 5, 6, 7], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1223,10 +1319,11 @@ def test_try_except(self) -> None: a = 123 assert a == 123 """, - lines=[1,2,3,4,5,6,7,8,9], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 9], missing="6", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1239,10 +1336,11 @@ def test_try_except(self) -> None: a = 123 assert a == 17 """, - lines=[1,2,3,4,5,6,7,8,9,10,11], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], missing="6, 9-10", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1252,7 +1350,7 @@ def test_try_except(self) -> None: a = 123 assert a == 123 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="4-5", branchz="", branchz_missing="", @@ -1261,12 +1359,13 @@ def test_try_except(self) -> None: def test_try_except_stranded_else(self) -> None: if env.PYBEHAVIOR.optimize_unreachable_try_else: # The else can't be reached because the try ends with a raise. - lines = [1,2,3,4,5,6,9] + lines = [1, 2, 3, 4, 5, 6, 9] missing = "" else: - lines = [1,2,3,4,5,6,8,9] + lines = [1, 2, 3, 4, 5, 6, 8, 9] missing = "8" - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1279,11 +1378,13 @@ def test_try_except_stranded_else(self) -> None: """, lines=lines, missing=missing, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) def test_try_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1291,10 +1392,11 @@ def test_try_finally(self) -> None: a = 99 assert a == 99 """, - lines=[1,2,3,5,6], + lines=[1, 2, 3, 5, 6], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1306,12 +1408,13 @@ def test_try_finally(self) -> None: a = 99 assert a == 99 and b == 123 """, - lines=[1,2,3,4,5,7,8,9,10], + lines=[1, 2, 3, 4, 5, 7, 8, 9, 10], missing="", ) def test_function_def(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 99 def foo(): ''' docstring @@ -1321,10 +1424,11 @@ def foo(): a = foo() assert a == 1 """, - lines=[1,2,5,7,8], + lines=[1, 2, 5, 7, 8], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def foo( a, b @@ -1336,10 +1440,11 @@ def foo( x = foo(17, 23) assert x == 40 """, - lines=[1,7,9,10], + lines=[1, 7, 9, 10], missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ def foo( a = (lambda x: x*2)(10), b = ( @@ -1354,12 +1459,13 @@ def foo( x = foo() assert x == 22 """, - lines=[1,10,12,13], + lines=[1, 10, 12, 13], missing="", ) def test_class_def(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ # A comment. class theClass: ''' the docstring. @@ -1390,7 +1496,8 @@ def test_attribute_annotation(self) -> None: lines = [1, 3] else: lines = [1, 2, 3] - self.check_coverage("""\ + self.check_coverage( + """\ class X: x: int y = 1 @@ -1400,7 +1507,8 @@ class X: ) def test_attribute_annotation_from_future(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ from __future__ import annotations class X: x: int @@ -1416,7 +1524,8 @@ class ExcludeTest(CoverageTest): def test_default(self) -> None: # A number of forms of pragma comment are accepted. - self.check_coverage("""\ + self.check_coverage( + """\ a = 1 b = 2 # pragma: no cover c = 3 @@ -1446,11 +1555,12 @@ def method25( ): return a[1,...] def f28(): print("(well): ... #2 false positive!") """, - lines=[1,3,5,7,9,11,19,24,25] + lines=[1, 3, 5, 7, 9, 11, 19, 24, 25], ) def test_two_excludes(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2 if a == 99: @@ -1459,13 +1569,14 @@ def test_two_excludes(self) -> None: c = 6 # -xx assert a == 1 and b == 2 """, - lines=[1,3,5,7], + lines=[1, 3, 5, 7], missing="5", - excludes=['-cc', '-xx'], + excludes=["-cc", "-xx"], ) def test_excluding_elif_suites(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 1; b = 2 if 1==1: @@ -1480,13 +1591,14 @@ def test_excluding_elif_suites(self) -> None: b = 12 assert a == 4 and b == 5 and c == 6 """, - lines=[1,3,4,5,6,11,12,13], + lines=[1, 3, 4, 5, 6, 11, 12, 13], missing="11-12", - excludes=['#pragma: NO COVER'], + excludes=["#pragma: NO COVER"], ) def test_excluding_try_except(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1496,15 +1608,16 @@ def test_excluding_try_except(self) -> None: a = 123 assert a == 123 """, - lines=[1,2,3,7,8], + lines=[1, 2, 3, 7, 8], missing="", - excludes=['#pragma: NO COVER'], + excludes=["#pragma: NO COVER"], branchz="", branchz_missing="", ) def test_excluding_try_except_stranded_else(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0 try: a = 1 @@ -1515,25 +1628,26 @@ def test_excluding_try_except_stranded_else(self) -> None: x = 2 assert a == 99 """, - lines=[1,2,3,4,5,6,9], + lines=[1, 2, 3, 4, 5, 6, 9], missing="", - excludes=['#pragma: NO COVER'], + excludes=["#pragma: NO COVER"], branchz="", branchz_missing="", ) def test_excluded_comprehension_branches(self) -> None: # https://github.com/nedbat/coveragepy/issues/1271 - self.check_coverage("""\ + self.check_coverage( + """\ x, y = [0], [1] if x == [2]: raise NotImplementedError # NOCOVPLZ if all(_ == __ for _, __ in zip(x, y)): raise NotImplementedError # NOCOVPLZ """, - lines=[1,2,4], + lines=[1, 2, 4], missing="", - excludes=['# NOCOVPLZ'], + excludes=["# NOCOVPLZ"], branchz="23 24 45 4.", branchz_missing="", ) @@ -1543,7 +1657,8 @@ class Py24Test(CoverageTest): """Tests of new syntax in Python 2.4.""" def test_function_decorators(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def require_int(func): def wrapper(arg): assert isinstance(arg, int) @@ -1562,7 +1677,8 @@ def p1(arg): ) def test_function_decorators_with_args(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def boost_by(extra): def decorator(func): def wrapper(arg): @@ -1581,7 +1697,8 @@ def boosted(arg): ) def test_double_function_decorators(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ def require_int(func): def wrapper(arg): assert isinstance(arg, int) @@ -1618,7 +1735,8 @@ class Py25Test(CoverageTest): """Tests of new syntax in Python 2.5.""" def test_with_statement(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ class Managed: def __enter__(self): desc = "enter" @@ -1638,12 +1756,13 @@ def __exit__(self, type, value, tb): except: desc = "caught" """, - lines=[1,2,3,5,6,8,9,10,11,13,14,15,16,17,18], + lines=[1, 2, 3, 5, 6, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18], missing="", ) def test_try_except_finally(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1653,12 +1772,13 @@ def test_try_except_finally(self) -> None: b = 2 assert a == 1 and b == 2 """, - lines=[1,2,3,4,5,7,8], + lines=[1, 2, 3, 4, 5, 7, 8], missing="4-5", branchz="", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1669,12 +1789,13 @@ def test_try_except_finally(self) -> None: b = 2 assert a == 99 and b == 2 """, - lines=[1,2,3,4,5,6,8,9], + lines=[1, 2, 3, 4, 5, 6, 8, 9], missing="", branchz="", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1687,12 +1808,13 @@ def test_try_except_finally(self) -> None: b = 2 assert a == 123 and b == 2 """, - lines=[1,2,3,4,5,6,7,8,10,11], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 10, 11], missing="6", branchz="", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1707,12 +1829,13 @@ def test_try_except_finally(self) -> None: b = 2 assert a == 17 and b == 2 """, - lines=[1,2,3,4,5,6,7,8,9,10,12,13], + lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13], missing="6, 9-10", branchz="", branchz_missing="", ) - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1724,7 +1847,7 @@ def test_try_except_finally(self) -> None: b = 2 assert a == 123 and b == 2 """, - lines=[1,2,3,4,5,7,9,10], + lines=[1, 2, 3, 4, 5, 7, 9, 10], missing="4-5", branchz="", branchz_missing="", @@ -1733,12 +1856,13 @@ def test_try_except_finally(self) -> None: def test_try_except_finally_stranded_else(self) -> None: if env.PYBEHAVIOR.optimize_unreachable_try_else: # The else can't be reached because the try ends with a raise. - lines = [1,2,3,4,5,6,10,11] + lines = [1, 2, 3, 4, 5, 6, 10, 11] missing = "" else: - lines = [1,2,3,4,5,6,8,10,11] + lines = [1, 2, 3, 4, 5, 6, 8, 10, 11] missing = "8" - self.check_coverage("""\ + self.check_coverage( + """\ a = 0; b = 0 try: a = 1 @@ -1753,7 +1877,8 @@ def test_try_except_finally_stranded_else(self) -> None: """, lines=lines, missing=missing, - branchz="", branchz_missing="", + branchz="", + branchz_missing="", ) diff --git a/tests/test_data.py b/tests/test_data.py index de89e6f4b..6751d6707 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -29,38 +29,38 @@ LINES_1 = { - 'a.py': {1, 2}, - 'b.py': {3}, + "a.py": {1, 2}, + "b.py": {3}, } -SUMMARY_1 = {'a.py': 2, 'b.py': 1} -MEASURED_FILES_1 = ['a.py', 'b.py'] +SUMMARY_1 = {"a.py": 2, "b.py": 1} +MEASURED_FILES_1 = ["a.py", "b.py"] A_PY_LINES_1 = [1, 2] B_PY_LINES_1 = [3] LINES_2 = { - 'a.py': {1, 5}, - 'c.py': {17}, + "a.py": {1, 5}, + "c.py": {17}, } -SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1} -MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py'] +SUMMARY_1_2 = {"a.py": 3, "b.py": 1, "c.py": 1} +MEASURED_FILES_1_2 = ["a.py", "b.py", "c.py"] ARCS_3 = { - 'x.py': {(-1, 1), (1, 2), (2, 3), (3, -1)}, - 'y.py': {(-1, 17), (17, 23), (23, -1)}, + "x.py": {(-1, 1), (1, 2), (2, 3), (3, -1)}, + "y.py": {(-1, 17), (17, 23), (23, -1)}, } X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)] Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)] -SUMMARY_3 = {'x.py': 3, 'y.py': 2} -MEASURED_FILES_3 = ['x.py', 'y.py'] +SUMMARY_3 = {"x.py": 3, "y.py": 2} +MEASURED_FILES_3 = ["x.py", "y.py"] X_PY_LINES_3 = [1, 2, 3] Y_PY_LINES_3 = [17, 23] ARCS_4 = { - 'x.py': {(-1, 2), (2, 5), (5, -1)}, - 'z.py': {(-1, 1000), (1000, -1)}, + "x.py": {(-1, 2), (2, 5), (5, -1)}, + "z.py": {(-1, 1000), (1000, -1)}, } -SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1} -MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py'] +SUMMARY_3_4 = {"x.py": 4, "y.py": 2, "z.py": 1} +MEASURED_FILES_3_4 = ["x.py", "y.py", "z.py"] def DebugCoverageData(*args: Any, **kwargs: Any) -> CoverageData: @@ -82,11 +82,12 @@ def DebugCoverageData(*args: Any, **kwargs: Any) -> CoverageData: # This is just a way to get a mix of debug options across the tests. options.extend(["dataop2", "sqldata"]) debug = DebugControlString(options=options) - return CoverageData(*args, debug=debug, **kwargs) # type: ignore[misc] + return CoverageData(*args, debug=debug, **kwargs) # type: ignore[misc] TCoverageData = Callable[..., CoverageData] + def assert_line_counts( covdata: CoverageData, counts: Mapping[str, int], @@ -95,10 +96,12 @@ def assert_line_counts( """Check that the line_counts of `covdata` is `counts`.""" assert line_counts(covdata, fullpath) == counts + def assert_measured_files(covdata: CoverageData, measured: Iterable[str]) -> None: """Check that `covdata`'s measured files are `measured`.""" assert_count_equal(covdata.measured_files(), measured) + def assert_lines1_data(covdata: CoverageData) -> None: """Check that `covdata` has the data from LINES1.""" assert_line_counts(covdata, SUMMARY_1) @@ -106,6 +109,7 @@ def assert_lines1_data(covdata: CoverageData) -> None: assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1) assert not covdata.has_arcs() + def assert_arcs3_data(covdata: CoverageData) -> None: """Check that `covdata` has the data from ARCS3.""" assert_line_counts(covdata, SUMMARY_3) @@ -119,6 +123,7 @@ def assert_arcs3_data(covdata: CoverageData) -> None: TData = TypeVar("TData", bound=Union[TLineNo, TArc]) + def dicts_from_sets(file_data: dict[str, set[TData]]) -> dict[str, dict[TData, None]]: """Convert a dict of sets into a dict of dicts. @@ -216,47 +221,47 @@ def test_cant_add_lines_with_arcs(self, klass: TCoverageData) -> None: def test_touch_file_with_lines(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) - covdata.touch_file('zzz.py') - assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) + covdata.touch_file("zzz.py") + assert_measured_files(covdata, MEASURED_FILES_1 + ["zzz.py"]) def test_touch_file_with_arcs(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) - covdata.touch_file('zzz.py') - assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) + covdata.touch_file("zzz.py") + assert_measured_files(covdata, MEASURED_FILES_3 + ["zzz.py"]) def test_set_query_contexts(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_a') + covdata.set_context("test_a") covdata.add_lines(LINES_1) - covdata.set_query_contexts(['te.*a']) - assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['other']) - assert covdata.lines('a.py') == [] + covdata.set_query_contexts(["te.*a"]) + assert covdata.lines("a.py") == [1, 2] + covdata.set_query_contexts(["other"]) + assert covdata.lines("a.py") == [] def test_no_lines_vs_unmeasured_file(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) - covdata.touch_file('zzz.py') - assert covdata.lines('zzz.py') == [] - assert covdata.lines('no_such_file.py') is None + covdata.touch_file("zzz.py") + assert covdata.lines("zzz.py") == [] + assert covdata.lines("no_such_file.py") is None def test_lines_with_contexts(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_a') + covdata.set_context("test_a") covdata.add_lines(LINES_1) - assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['test']) - assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['other']) - assert covdata.lines('a.py') == [] + assert covdata.lines("a.py") == [1, 2] + covdata.set_query_contexts(["test"]) + assert covdata.lines("a.py") == [1, 2] + covdata.set_query_contexts(["other"]) + assert covdata.lines("a.py") == [] def test_contexts_by_lineno_with_lines(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_a') + covdata.set_context("test_a") covdata.add_lines(LINES_1) - expected = {1: ['test_a'], 2: ['test_a']} - assert covdata.contexts_by_lineno('a.py') == expected + expected = {1: ["test_a"], 2: ["test_a"]} + assert covdata.contexts_by_lineno("a.py") == expected @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_no_duplicate_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> None: @@ -265,7 +270,7 @@ def test_no_duplicate_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> N covdata.add_lines(lines) covdata.set_context("context2") covdata.add_lines(lines) - assert covdata.lines('a.py') == A_PY_LINES_1 + assert covdata.lines("a.py") == A_PY_LINES_1 @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_no_duplicate_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None: @@ -274,39 +279,39 @@ def test_no_duplicate_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None: covdata.add_arcs(arcs) covdata.set_context("context2") covdata.add_arcs(arcs) - assert covdata.arcs('x.py') == X_PY_ARCS_3 + assert covdata.arcs("x.py") == X_PY_ARCS_3 def test_no_arcs_vs_unmeasured_file(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) - covdata.touch_file('zzz.py') - assert covdata.lines('zzz.py') == [] - assert covdata.lines('no_such_file.py') is None - assert covdata.arcs('zzz.py') == [] - assert covdata.arcs('no_such_file.py') is None + covdata.touch_file("zzz.py") + assert covdata.lines("zzz.py") == [] + assert covdata.lines("no_such_file.py") is None + assert covdata.arcs("zzz.py") == [] + assert covdata.arcs("no_such_file.py") is None def test_arcs_with_contexts(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_x') + covdata.set_context("test_x") covdata.add_arcs(ARCS_3) - assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] - covdata.set_query_contexts(['test_.$']) - assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] - covdata.set_query_contexts(['other']) - assert covdata.arcs('x.py') == [] + assert covdata.arcs("x.py") == [(-1, 1), (1, 2), (2, 3), (3, -1)] + covdata.set_query_contexts(["test_.$"]) + assert covdata.arcs("x.py") == [(-1, 1), (1, 2), (2, 3), (3, -1)] + covdata.set_query_contexts(["other"]) + assert covdata.arcs("x.py") == [] def test_contexts_by_lineno_with_arcs(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_x') + covdata.set_context("test_x") covdata.add_arcs(ARCS_3) - expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']} - assert covdata.contexts_by_lineno('x.py') == expected + expected = {1: ["test_x"], 2: ["test_x"], 3: ["test_x"]} + assert covdata.contexts_by_lineno("x.py") == expected def test_contexts_by_lineno_with_unknown_file(self) -> None: covdata = DebugCoverageData() - covdata.set_context('test_x') + covdata.set_context("test_x") covdata.add_arcs(ARCS_3) - assert covdata.contexts_by_lineno('xyz.py') == {} + assert covdata.contexts_by_lineno("xyz.py") == {} def test_context_by_lineno_with_query_contexts_with_lines(self) -> None: covdata = DebugCoverageData() @@ -315,7 +320,7 @@ def test_context_by_lineno_with_query_contexts_with_lines(self) -> None: covdata.set_context("test_2") covdata.add_lines(LINES_2) covdata.set_query_context("test_1") - assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"]) + assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1, 2], ["test_1"]) def test_context_by_lineno_with_query_contexts_with_arcs(self) -> None: covdata = DebugCoverageData() @@ -324,15 +329,17 @@ def test_context_by_lineno_with_query_contexts_with_arcs(self) -> None: covdata.set_context("test_2") covdata.add_arcs(ARCS_4) covdata.set_query_context("test_1") - assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"]) + assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1, 2, 3], ["test_1"]) def test_file_tracer_name(self) -> None: covdata = DebugCoverageData() - covdata.add_lines({ - "p1.foo": [1, 2, 3], - "p2.html": [10, 11, 12], - "main.py": [20], - }) + covdata.add_lines( + { + "p1.foo": [1, 2, 3], + "p2.html": [10, 11, 12], + "main.py": [20], + } + ) covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) assert covdata.file_tracer("p1.foo") == "p1.plugin" assert covdata.file_tracer("p2.html") == "p2.plugin" @@ -341,21 +348,25 @@ def test_file_tracer_name(self) -> None: def test_ok_to_repeat_file_tracer(self) -> None: covdata = DebugCoverageData() - covdata.add_lines({ - "p1.foo": [1, 2, 3], - "p2.html": [10, 11, 12], - }) + covdata.add_lines( + { + "p1.foo": [1, 2, 3], + "p2.html": [10, 11, 12], + } + ) covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) covdata.add_file_tracers({"p1.foo": "p1.plugin"}) assert covdata.file_tracer("p1.foo") == "p1.plugin" def test_ok_to_set_empty_file_tracer(self) -> None: covdata = DebugCoverageData() - covdata.add_lines({ - "p1.foo": [1, 2, 3], - "p2.html": [10, 11, 12], - "main.py": [20], - }) + covdata.add_lines( + { + "p1.foo": [1, 2, 3], + "p2.html": [10, 11, 12], + "main.py": [20], + } + ) covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""}) assert covdata.file_tracer("p1.foo") == "p1.plugin" assert covdata.file_tracer("main.py") == "" @@ -370,13 +381,13 @@ def test_cant_change_file_tracer_name(self) -> None: covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"}) def test_update_lines(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines(LINES_1) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines(LINES_2) - covdata3 = DebugCoverageData(suffix='3') + covdata3 = DebugCoverageData(suffix="3") covdata3.update(covdata1) covdata3.update(covdata2) @@ -384,13 +395,13 @@ def test_update_lines(self) -> None: assert_measured_files(covdata3, MEASURED_FILES_1_2) def test_update_arcs(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_arcs(ARCS_3) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata2.add_arcs(ARCS_4) - covdata3 = DebugCoverageData(suffix='3') + covdata3 = DebugCoverageData(suffix="3") covdata3.update(covdata1) covdata3.update(covdata2) @@ -398,10 +409,10 @@ def test_update_arcs(self) -> None: assert_measured_files(covdata3, MEASURED_FILES_3_4) def test_update_cant_mix_lines_and_arcs(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines(LINES_1) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata2.add_arcs(ARCS_3) msg = "Can't combine branch coverage data with statement data" @@ -413,31 +424,39 @@ def test_update_cant_mix_lines_and_arcs(self) -> None: covdata2.update(covdata1) def test_update_file_tracers(self) -> None: - covdata1 = DebugCoverageData(suffix='1') - covdata1.add_lines({ - "p1.html": [1, 2, 3, 4], - "p2.html": [5, 6, 7], - "main.py": [10, 11, 12], - }) - covdata1.add_file_tracers({ - "p1.html": "html.plugin", - "p2.html": "html.plugin2", - }) - - covdata2 = DebugCoverageData(suffix='2') - covdata2.add_lines({ - "p1.html": [3, 4, 5, 6], - "p2.html": [7, 8, 9], - "p3.foo": [1000, 1001], - "main.py": [10, 11, 12], - }) - covdata2.add_file_tracers({ - "p1.html": "html.plugin", - "p2.html": "html.plugin2", - "p3.foo": "foo_plugin", - }) - - covdata3 = DebugCoverageData(suffix='3') + covdata1 = DebugCoverageData(suffix="1") + covdata1.add_lines( + { + "p1.html": [1, 2, 3, 4], + "p2.html": [5, 6, 7], + "main.py": [10, 11, 12], + } + ) + covdata1.add_file_tracers( + { + "p1.html": "html.plugin", + "p2.html": "html.plugin2", + } + ) + + covdata2 = DebugCoverageData(suffix="2") + covdata2.add_lines( + { + "p1.html": [3, 4, 5, 6], + "p2.html": [7, 8, 9], + "p3.foo": [1000, 1001], + "main.py": [10, 11, 12], + } + ) + covdata2.add_file_tracers( + { + "p1.html": "html.plugin", + "p2.html": "html.plugin2", + "p3.foo": "foo_plugin", + } + ) + + covdata3 = DebugCoverageData(suffix="3") covdata3.update(covdata1) covdata3.update(covdata2) assert covdata3.file_tracer("p1.html") == "html.plugin" @@ -446,11 +465,11 @@ def test_update_file_tracers(self) -> None: assert covdata3.file_tracer("main.py") == "" def test_update_conflicting_file_tracers(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines({"p1.html": [1, 2, 3]}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines({"p1.html": [1, 2, 3]}) covdata2.add_file_tracers({"p1.html": "html.other_plugin"}) @@ -479,18 +498,18 @@ def test_update_file_tracer_vs_no_file_tracer(self) -> None: covdata2.update(covdata1) def test_update_lines_empty(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines(LINES_1) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_1) def test_update_arcs_empty(self) -> None: - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_arcs(ARCS_3) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_3) @@ -507,8 +526,8 @@ def test_add_to_hash_with_lines(self) -> None: hasher = mock.Mock() add_data_to_hash(covdata, "a.py", hasher) assert hasher.method_calls == [ - mock.call.update([1, 2]), # lines - mock.call.update(""), # file_tracer name + mock.call.update([1, 2]), # lines + mock.call.update(""), # file_tracer name ] def test_add_to_hash_with_arcs(self) -> None: @@ -518,8 +537,8 @@ def test_add_to_hash_with_arcs(self) -> None: hasher = mock.Mock() add_data_to_hash(covdata, "y.py", hasher) assert hasher.method_calls == [ - mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs - mock.call.update("hologram_plugin"), # file_tracer name + mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs + mock.call.update("hologram_plugin"), # file_tracer name ] def test_add_to_lines_hash_with_missing_file(self) -> None: @@ -580,7 +599,7 @@ def thread_main() -> None: """Every thread will try to add the same data.""" try: covdata.add_lines(LINES_1) - except Exception as ex: # pragma: only failure + except Exception as ex: # pragma: only failure exceptions.append(ex) threads = [threading.Thread(target=thread_main) for _ in range(10)] @@ -668,7 +687,7 @@ def test_error_when_closing(self, klass: TCoverageData) -> None: covdata.add_lines(LINES_1) # I don't know how to make a real error, so let's fake one. sqldb = list(covdata._dbs.values())[0] - sqldb.close = lambda: 1/0 # type: ignore + sqldb.close = lambda: 1 / 0 # type: ignore covdata.add_lines(LINES_1) def test_wrong_schema_version(self) -> None: @@ -725,14 +744,14 @@ def test_debug_output_with_debug_option(self) -> None: print(debug.get_output()) assert re.search( - r"^" + - r"Closing dbs, force=False: {}\n" + - r"Erasing data file '.*\.coverage' \(does not exist\)\n" + - r"Opening data file '.*\.coverage' \(does not exist\)\n" + - r"Initing data file '.*\.coverage' \(0 bytes, modified [-:. 0-9]+\)\n" + - r"Writing \(no-op\) data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n" + - r"Opening data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n" + - r"$", + r"^" + + r"Closing dbs, force=False: {}\n" + + r"Erasing data file '.*\.coverage' \(does not exist\)\n" + + r"Opening data file '.*\.coverage' \(does not exist\)\n" + + r"Initing data file '.*\.coverage' \(0 bytes, modified [-:. 0-9]+\)\n" + + r"Writing \(no-op\) data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n" + + r"Opening data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n" + + r"$", debug.get_output(), ) @@ -752,7 +771,7 @@ def test_debug_output_without_debug_option(self) -> None: def test_explicit_suffix(self) -> None: self.assert_doesnt_exist(".coverage.SUFFIX") - covdata = DebugCoverageData(suffix='SUFFIX') + covdata = DebugCoverageData(suffix="SUFFIX") covdata.add_lines(LINES_1) covdata.write() self.assert_exists(".coverage.SUFFIX") @@ -783,13 +802,13 @@ def test_true_suffix(self) -> None: def test_combining(self) -> None: self.assert_file_count(".coverage.*", 0) - covdata1 = DebugCoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines(LINES_1) covdata1.write() self.assert_exists(".coverage.1") self.assert_file_count(".coverage.*", 1) - covdata2 = DebugCoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines(LINES_2) covdata2.write() self.assert_exists(".coverage.2") @@ -823,22 +842,28 @@ def test_erasing_parallel(self) -> None: self.assert_exists(".coverage") def test_combining_with_aliases(self) -> None: - covdata1 = DebugCoverageData(suffix='1') - covdata1.add_lines({ - '/home/ned/proj/src/a.py': {1, 2}, - '/home/ned/proj/src/sub/b.py': {3}, - '/home/ned/proj/src/template.html': {10}, - }) - covdata1.add_file_tracers({ - '/home/ned/proj/src/template.html': 'html.plugin', - }) + covdata1 = DebugCoverageData(suffix="1") + covdata1.add_lines( + { + "/home/ned/proj/src/a.py": {1, 2}, + "/home/ned/proj/src/sub/b.py": {3}, + "/home/ned/proj/src/template.html": {10}, + } + ) + covdata1.add_file_tracers( + { + "/home/ned/proj/src/template.html": "html.plugin", + } + ) covdata1.write() - covdata2 = DebugCoverageData(suffix='2') - covdata2.add_lines({ - r'c:\ned\test\a.py': {4, 5}, - r'c:\ned\test\sub\b.py': {3, 6}, - }) + covdata2 = DebugCoverageData(suffix="2") + covdata2.add_lines( + { + r"c:\ned\test\a.py": {4, 5}, + r"c:\ned\test\sub\b.py": {3, 6}, + } + ) covdata2.write() self.assert_file_count(".coverage.*", 2) @@ -854,32 +879,32 @@ def test_combining_with_aliases(self) -> None: self.assert_file_count(".coverage.*", 0) self.assert_exists(".coverage") - apy = canonical_filename('./a.py') - sub_bpy = canonical_filename('./sub/b.py') - template_html = canonical_filename('./template.html') + apy = canonical_filename("./a.py") + sub_bpy = canonical_filename("./sub/b.py") + template_html = canonical_filename("./template.html") assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) assert_measured_files(covdata3, [apy, sub_bpy, template_html]) - assert covdata3.file_tracer(template_html) == 'html.plugin' + assert covdata3.file_tracer(template_html) == "html.plugin" def test_combining_from_different_directories(self) -> None: - os.makedirs('cov1') - covdata1 = DebugCoverageData('cov1/.coverage.1') + os.makedirs("cov1") + covdata1 = DebugCoverageData("cov1/.coverage.1") covdata1.add_lines(LINES_1) covdata1.write() - os.makedirs('cov2') - covdata2 = DebugCoverageData('cov2/.coverage.2') + os.makedirs("cov2") + covdata2 = DebugCoverageData("cov2/.coverage.2") covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. - covdata_xxx = DebugCoverageData('.coverage.xxx') + covdata_xxx = DebugCoverageData(".coverage.xxx") covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() covdata3 = DebugCoverageData() - combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) + combine_parallel_data(covdata3, data_paths=["cov1", "cov2"]) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) @@ -888,30 +913,30 @@ def test_combining_from_different_directories(self) -> None: self.assert_exists(".coverage.xxx") def test_combining_from_files(self) -> None: - os.makedirs('cov1') - covdata1 = DebugCoverageData('cov1/.coverage.1') + os.makedirs("cov1") + covdata1 = DebugCoverageData("cov1/.coverage.1") covdata1.add_lines(LINES_1) covdata1.write() # Journal files should never be included in the combining. self.make_file("cov1/.coverage.1-journal", "xyzzy") - os.makedirs('cov2') - covdata2 = DebugCoverageData('cov2/.coverage.2') + os.makedirs("cov2") + covdata2 = DebugCoverageData("cov2/.coverage.2") covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. - covdata_xxx = DebugCoverageData('.coverage.xxx') + covdata_xxx = DebugCoverageData(".coverage.xxx") covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() - covdata_2xxx = DebugCoverageData('cov2/.coverage.xxx') + covdata_2xxx = DebugCoverageData("cov2/.coverage.xxx") covdata_2xxx.add_arcs(ARCS_3) covdata_2xxx.write() covdata3 = DebugCoverageData() - combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) + combine_parallel_data(covdata3, data_paths=["cov1", "cov2/.coverage.2"]) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) @@ -924,7 +949,7 @@ def test_combining_from_nonexistent_directories(self) -> None: covdata = DebugCoverageData() msg = "Couldn't combine from non-existent path 'xyzzy'" with pytest.raises(NoDataError, match=msg): - combine_parallel_data(covdata, data_paths=['xyzzy']) + combine_parallel_data(covdata, data_paths=["xyzzy"]) def test_interleaved_erasing_bug716(self) -> None: # pytest-cov could produce this scenario. #716 @@ -1005,7 +1030,7 @@ def test_serialization(self, klass: TCoverageData) -> None: def test_misfed_serialization(self) -> None: covdata = CoverageData(no_disk=True) - bad_data = b'Hello, world!\x07 ' + b'z' * 100 + bad_data = b"Hello, world!\x07 " + b"z" * 100 msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format( re.escape(repr(bad_data[:40])), len(bad_data), @@ -1022,9 +1047,9 @@ class NoDiskTest(CoverageTest): def test_updating(self) -> None: # https://github.com/nedbat/coveragepy/issues/1323 a = CoverageData(no_disk=True) - a.add_lines({'foo.py': [10, 20, 30]}) - assert a.measured_files() == {'foo.py'} + a.add_lines({"foo.py": [10, 20, 30]}) + assert a.measured_files() == {"foo.py"} b = CoverageData(no_disk=True) b.update(a) - assert b.measured_files() == {'foo.py'} + assert b.measured_files() == {"foo.py"} diff --git a/tests/test_debug.py b/tests/test_debug.py index fc6f87091..19f2a5828 100644 --- a/tests/test_debug.py +++ b/tests/test_debug.py @@ -19,10 +19,18 @@ import coverage from coverage import env from coverage.debug import ( - DebugControl, DebugOutputFile, - auto_repr, clipped_repr, exc_one_line, filter_text, - info_formatter, info_header, - relevant_environment_display, short_id, short_filename, short_stack, + DebugControl, + DebugOutputFile, + auto_repr, + clipped_repr, + exc_one_line, + filter_text, + info_formatter, + info_header, + relevant_environment_display, + short_id, + short_filename, + short_stack, ) from coverage.exceptions import DataError @@ -37,70 +45,90 @@ class InfoFormatterTest(CoverageTest): run_in_temp_dir = False def test_info_formatter(self) -> None: - lines = list(info_formatter([ - ('x', 'hello there'), - ('very long label', ['one element']), - ('regular', ['abc', 'def', 'ghi', 'jkl']), - ('nothing', []), - ])) + lines = list( + info_formatter( + [ + ("x", "hello there"), + ("very long label", ["one element"]), + ("regular", ["abc", "def", "ghi", "jkl"]), + ("nothing", []), + ] + ) + ) expected = [ - ' x: hello there', - ' very long label: one element', - ' regular: abc', - ' def', - ' ghi', - ' jkl', - ' nothing: -none-', + " x: hello there", + " very long label: one element", + " regular: abc", + " def", + " ghi", + " jkl", + " nothing: -none-", ] assert expected == lines def test_info_formatter_with_generator(self) -> None: - lines = list(info_formatter(('info%d' % i, i) for i in range(3))) + lines = list(info_formatter(("info%d" % i, i) for i in range(3))) expected = [ - ' info0: 0', - ' info1: 1', - ' info2: 2', + " info0: 0", + " info1: 1", + " info2: 2", ] assert expected == lines def test_too_long_label(self) -> None: with pytest.raises(AssertionError): - list(info_formatter([('this label is way too long and will not fit', 23)])) + list(info_formatter([("this label is way too long and will not fit", 23)])) -@pytest.mark.parametrize("label, header", [ - ("x", "-- x ---------------------------------------------------------"), - ("hello there", "-- hello there -----------------------------------------------"), -]) +@pytest.mark.parametrize( + "label, header", + [ + ("x", "-- x ---------------------------------------------------------"), + ("hello there", "-- hello there -----------------------------------------------"), + ], +) def test_info_header(label: str, header: str) -> None: assert header == info_header(label) -@pytest.mark.parametrize("id64, id16", [ - (0x1234, 0x1234), - (0x12340000, 0x1234), - (0xA5A55A5A, 0xFFFF), - (0x1234cba956780fed, 0x8008), -]) +@pytest.mark.parametrize( + "id64, id16", + [ + (0x1234, 0x1234), + (0x12340000, 0x1234), + (0xA5A55A5A, 0xFFFF), + (0x1234CBA956780FED, 0x8008), + ], +) def test_short_id(id64: int, id16: int) -> None: assert id16 == short_id(id64) -@pytest.mark.parametrize("text, numchars, result", [ - ("hello", 10, "'hello'"), - ("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"), -]) +@pytest.mark.parametrize( + "text, numchars, result", + [ + ("hello", 10, "'hello'"), + ("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"), + ], +) def test_clipped_repr(text: str, numchars: int, result: str) -> None: assert result == clipped_repr(text, numchars) -@pytest.mark.parametrize("text, filters, result", [ - ("hello", [], "hello"), - ("hello\n", [], "hello\n"), - ("hello\nhello\n", [], "hello\nhello\n"), - ("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"), - ("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"), -]) +@pytest.mark.parametrize( + "text, filters, result", + [ + ("hello", [], "hello"), + ("hello\n", [], "hello\n"), + ("hello\nhello\n", [], "hello\nhello\n"), + ("hello\nbye\n", [lambda x: "=" + x], "=hello\n=bye\n"), + ( + "hello\nbye\n", + [lambda x: "=" + x, lambda x: x + "\ndone\n"], + "=hello\ndone\n=bye\ndone\n", + ), + ], +) def test_filter_text( text: str, filters: Iterable[Callable[[str], str]], @@ -115,13 +143,16 @@ class DebugTraceTest(CoverageTest): def f1_debug_output(self, debug: Iterable[str]) -> str: """Runs some code with `debug` option, returns the debug output.""" # Make code to run. - self.make_file("f1.py", """\ + self.make_file( + "f1.py", + """\ def f1(x): return x+1 for i in range(5): f1(i) - """) + """, + ) debug_out = io.StringIO() cov = coverage.Coverage(debug=debug) @@ -187,7 +218,7 @@ def test_debug_config(self) -> None: report_include report_omit """.split() for label in labels: - label_pat = fr"^\s*{label}: " + label_pat = rf"^\s*{label}: " msg = f"Incorrect lines for {label!r}" assert 1 == len(re_lines(label_pat, out_text)), msg @@ -230,7 +261,7 @@ def assert_good_debug_sys(out_text: str) -> None: pid cwd path environment command_line cover_match pylib_match """.split() for label in labels: - label_pat = fr"^\s*{label}: " + label_pat = rf"^\s*{label}: " msg = f"Incorrect lines for {label!r}" assert 1 == len(re_lines(label_pat, out_text)), msg tracer_line = re_line(" core:", out_text).strip() @@ -301,7 +332,7 @@ def test_debug_control(self) -> None: def test_debug_write_exceptions(self) -> None: debug = DebugControlString(["yes"]) try: - raise RuntimeError('Oops') # This is in the traceback + raise RuntimeError("Oops") # This is in the traceback except Exception as exc: debug.write("Something happened", exc=exc) lines = debug.get_output().splitlines() @@ -313,6 +344,7 @@ def test_debug_write_exceptions(self) -> None: def test_debug_write_self(self) -> None: class DebugWritingClass: """A simple class to show 'self:' debug messages.""" + def __init__(self, debug: DebugControl) -> None: # This line will have "self:" reported. debug.write("Hello from me") @@ -339,10 +371,12 @@ def f_one(*args: Any, **kwargs: Any) -> str: """First of the chain of functions for testing `short_stack`.""" return f_two(*args, **kwargs) + def f_two(*args: Any, **kwargs: Any) -> str: """Second of the chain of functions for testing `short_stack`.""" return f_three(*args, **kwargs) + def f_three(*args: Any, **kwargs: Any) -> str: """Third of the chain of functions for testing `short_stack`.""" return short_stack(*args, **kwargs) @@ -377,9 +411,9 @@ def test_short_stack_full(self) -> None: py = "pypy" if env.PYPY else "python" majv, minv = sys.version_info[:2] pylib = f"lib{s}{py}{majv}.{minv}{sys.abiflags}" - assert len(re_lines(fr"{s}{pylib}{s}site-packages{s}_pytest", stack_text)) > 3 - assert len(re_lines(fr"{s}{pylib}{s}site-packages{s}pluggy", stack_text)) > 3 - assert not re_lines(r" 0x[0-9a-fA-F]+", stack_text) # No frame ids + assert len(re_lines(rf"{s}{pylib}{s}site-packages{s}_pytest", stack_text)) > 3 + assert len(re_lines(rf"{s}{pylib}{s}site-packages{s}pluggy", stack_text)) > 3 + assert not re_lines(r" 0x[0-9a-fA-F]+", stack_text) # No frame ids stack = stack_text.splitlines() assert len(stack) > 25 assert "test_short_stack" in stack[-4] @@ -391,8 +425,8 @@ def test_short_stack_short_filenames(self) -> None: stack_text = f_one(full=True, short_filenames=True) s = re.escape(os.sep) assert not re_lines(r"site-packages", stack_text) - assert len(re_lines(fr"syspath:{s}_pytest", stack_text)) > 3 - assert len(re_lines(fr"syspath:{s}pluggy", stack_text)) > 3 + assert len(re_lines(rf"syspath:{s}_pytest", stack_text)) > 3 + assert len(re_lines(rf"syspath:{s}pluggy", stack_text)) > 3 def test_short_stack_frame_ids(self) -> None: stack = f_one(full=True, frame_ids=True).splitlines() @@ -415,7 +449,7 @@ def test_short_filename(self) -> None: assert short_filename(env.__file__) == f"cov:{s}env.py" self.make_file("hello.txt", "hi") short_hello = short_filename(os.path.abspath("hello.txt")) - assert re.match(fr"tmp:{se}t\d+{se}hello.txt", short_hello) + assert re.match(rf"tmp:{se}t\d+{se}hello.txt", short_hello) oddball = f"{s}xyzzy{s}plugh{s}foo.txt" assert short_filename(oddball) == oddball assert short_filename(None) is None @@ -458,10 +492,13 @@ def test_exc_one_line() -> None: def test_auto_repr() -> None: class MyStuff: """Random class to test auto_repr.""" + def __init__(self) -> None: self.x = 17 self.y = "hello" + __repr__ = auto_repr + stuff = MyStuff() setattr(stuff, "$coverage.object_id", 123456) assert re.match(r"", repr(stuff)) diff --git a/tests/test_execfile.py b/tests/test_execfile.py index ef73d3a22..ab39b70f4 100644 --- a/tests/test_execfile.py +++ b/tests/test_execfile.py @@ -42,32 +42,35 @@ def test_run_python_file(self) -> None: mod_globs = json.loads(self.stdout()) # The file should think it is __main__ - assert mod_globs['__name__'] == "__main__" + assert mod_globs["__name__"] == "__main__" # It should seem to come from a file named try_execfile.py - dunder_file = os.path.basename(mod_globs['__file__']) + dunder_file = os.path.basename(mod_globs["__file__"]) assert dunder_file == "try_execfile.py" # It should have its correct module data. - assert mod_globs['__doc__'].splitlines()[0] == "Test file for run_python_file." - assert mod_globs['DATA'] == "xyzzy" - assert mod_globs['FN_VAL'] == "my_fn('fooey')" + assert mod_globs["__doc__"].splitlines()[0] == "Test file for run_python_file." + assert mod_globs["DATA"] == "xyzzy" + assert mod_globs["FN_VAL"] == "my_fn('fooey')" # It must be self-importable as __main__. - assert mod_globs['__main__.DATA'] == "xyzzy" + assert mod_globs["__main__.DATA"] == "xyzzy" # Argv should have the proper values. - assert mod_globs['argv0'] == TRY_EXECFILE - assert mod_globs['argv1-n'] == ["arg1", "arg2"] + assert mod_globs["argv0"] == TRY_EXECFILE + assert mod_globs["argv1-n"] == ["arg1", "arg2"] # __builtins__ should have the right values, like open(). - assert mod_globs['__builtins__.has_open'] is True + assert mod_globs["__builtins__.has_open"] is True def test_no_extra_file(self) -> None: # Make sure that running a file doesn't create an extra compiled file. - self.make_file("xxx", """\ + self.make_file( + "xxx", + """\ desc = "a non-.py file!" - """) + """, + ) assert os.listdir(".") == ["xxx"] run_python_file(["xxx"]) @@ -75,36 +78,42 @@ def test_no_extra_file(self) -> None: def test_universal_newlines(self) -> None: # Make sure we can read any sort of line ending. - pylines = """# try newlines|print('Hello, world!')|""".split('|') + pylines = """# try newlines|print('Hello, world!')|""".split("|") for nl in ["\n", "\r\n", "\r"]: with open("nl.py", "wb") as fpy: fpy.write(nl.join(pylines).encode("utf-8")) run_python_file(["nl.py"]) - assert self.stdout() == "Hello, world!\n"*3 + assert self.stdout() == "Hello, world!\n" * 3 def test_missing_final_newline(self) -> None: # Make sure we can deal with a Python file with no final newline. - self.make_file("abrupt.py", """\ + self.make_file( + "abrupt.py", + """\ if 1: a = 1 print(f"a is {a!r}") - #""") + #""", + ) with open("abrupt.py", encoding="utf-8") as f: abrupt = f.read() - assert abrupt[-1] == '#' + assert abrupt[-1] == "#" run_python_file(["abrupt.py"]) assert self.stdout() == "a is 1\n" def test_no_such_file(self) -> None: - path = python_reported_file('xyzzy.py') + path = python_reported_file("xyzzy.py") msg = re.escape(f"No file to run: '{path}'") with pytest.raises(NoSource, match=msg): run_python_file(["xyzzy.py"]) def test_directory_with_main(self) -> None: - self.make_file("with_main/__main__.py", """\ + self.make_file( + "with_main/__main__.py", + """\ print("I am __main__") - """) + """, + ) run_python_file(["with_main"]) assert self.stdout() == "I am __main__\n" @@ -114,7 +123,9 @@ def test_directory_without_main(self) -> None: run_python_file(["without_main"]) def test_code_throws(self) -> None: - self.make_file("throw.py", """\ + self.make_file( + "throw.py", + """\ class MyException(Exception): pass @@ -126,7 +137,8 @@ def f2(): f1() f2() - """) + """, + ) with pytest.raises(SystemExit) as exc_info: run_python_file(["throw.py"]) @@ -135,7 +147,9 @@ def f2(): assert self.stderr() == "" def test_code_exits(self) -> None: - self.make_file("exit.py", """\ + self.make_file( + "exit.py", + """\ import sys def f1(): print("about to exit..") @@ -145,7 +159,8 @@ def f2(): f1() f2() - """) + """, + ) with pytest.raises(SystemExit) as exc_info: run_python_file(["exit.py"]) @@ -154,7 +169,9 @@ def f2(): assert self.stderr() == "" def test_excepthook_exit(self) -> None: - self.make_file("excepthook_exit.py", """\ + self.make_file( + "excepthook_exit.py", + """\ import sys def excepthook(*args): @@ -164,14 +181,17 @@ def excepthook(*args): sys.excepthook = excepthook raise RuntimeError('Error Outside') - """) + """, + ) with pytest.raises(SystemExit): run_python_file(["excepthook_exit.py"]) cov_out = self.stdout() assert cov_out == "in excepthook\n" def test_excepthook_throw(self) -> None: - self.make_file("excepthook_throw.py", """\ + self.make_file( + "excepthook_throw.py", + """\ import sys def excepthook(*args): @@ -184,7 +204,8 @@ def excepthook(*args): sys.excepthook = excepthook raise RuntimeError('Error Outside') - """) + """, + ) with pytest.raises(_ExceptionDuringRun) as exc_info: run_python_file(["excepthook_throw.py"]) # The _ExceptionDuringRun exception has the RuntimeError as its argument. @@ -200,12 +221,15 @@ class RunPycFileTest(CoverageTest): def make_pyc(self, **kwargs: Any) -> str: """Create a .pyc file, and return the path to it.""" - self.make_file("compiled.py", """\ + self.make_file( + "compiled.py", + """\ def doit(): print("I am here!") doit() - """) + """, + ) compileall.compile_dir(".", quiet=True, **kwargs) os.remove("compiled.py") @@ -231,7 +255,7 @@ def test_running_pyc_from_wrong_python(self) -> None: # Jam Python 2.1 magic number into the .pyc file. with open(pycfile, "r+b") as fpyc: fpyc.seek(0) - fpyc.write(bytes([0x2a, 0xeb, 0x0d, 0x0a])) + fpyc.write(bytes([0x2A, 0xEB, 0x0D, 0x0A])) with pytest.raises(NoCode, match="Bad magic number in .pyc file"): run_python_file([pycfile]) @@ -245,7 +269,7 @@ def test_running_hashed_pyc(self) -> None: assert self.stdout() == "I am here!\n" def test_no_such_pyc_file(self) -> None: - path = python_reported_file('xyzzy.pyc') + path = python_reported_file("xyzzy.pyc") msg = re.escape(f"No file to run: '{path}'") with pytest.raises(NoCode, match=msg): run_python_file(["xyzzy.pyc"]) @@ -255,12 +279,12 @@ def test_running_py_from_binary(self) -> None: # be able to write binary files. bf = self.make_file("binary") with open(bf, "wb") as f: - f.write(b'\x7fELF\x02\x01\x01\x00\x00\x00') + f.write(b"\x7fELF\x02\x01\x01\x00\x00\x00") - path = python_reported_file('binary') + path = python_reported_file("binary") msg = ( - re.escape(f"Couldn't run '{path}' as Python code: ") + - r"(ValueError|SyntaxError): source code string cannot contain null bytes" + re.escape(f"Couldn't run '{path}' as Python code: ") + + r"(ValueError|SyntaxError): source code string cannot contain null bytes" ) with pytest.raises(Exception, match=msg): run_python_file([bf]) diff --git a/tests/test_filereporter.py b/tests/test_filereporter.py index c36fa013b..fccbd9f69 100644 --- a/tests/test_filereporter.py +++ b/tests/test_filereporter.py @@ -79,7 +79,7 @@ def test_comparison(self) -> None: acu2 = FileReporter("aa/afile.py") zcu = FileReporter("aa/zfile.py") bcu = FileReporter("aa/bb/bfile.py") - assert acu == acu2 and acu <= acu2 and acu >= acu2 # pylint: disable=chained-comparison + assert acu == acu2 and acu <= acu2 and acu >= acu2 # pylint: disable=chained-comparison assert acu < zcu and acu <= zcu and acu != zcu assert zcu > acu and zcu >= acu and zcu != acu assert acu < bcu and acu <= bcu and acu != bcu diff --git a/tests/test_files.py b/tests/test_files.py index 50e69e647..b0f6daa1e 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -19,8 +19,15 @@ from coverage import env, files from coverage.exceptions import ConfigError from coverage.files import ( - GlobMatcher, ModuleMatcher, PathAliases, TreeMatcher, abs_file, - actual_path, find_python_files, flat_rootname, globs_to_regex, + GlobMatcher, + ModuleMatcher, + PathAliases, + TreeMatcher, + abs_file, + actual_path, + find_python_files, + flat_rootname, + globs_to_regex, ) from tests.coveragetest import CoverageTest @@ -61,7 +68,7 @@ def test_filepath_contains_absolute_prefix_twice(self) -> None: files.set_relative_directory() d = abs_file(os.curdir) trick = os.path.splitdrive(d)[1].lstrip(os.path.sep) - rel = os.path.join('sub', trick, 'file1.py') + rel = os.path.join("sub", trick, "file1.py") assert files.relative_filename(abs_file(rel)) == rel def test_canonical_filename_ensure_cache_hit(self) -> None: @@ -69,27 +76,29 @@ def test_canonical_filename_ensure_cache_hit(self) -> None: d = actual_path(self.abs_path("sub/proj1")) os.chdir(d) files.set_relative_directory() - canonical_path = files.canonical_filename('sub/proj1/file1.py') - assert canonical_path == self.abs_path('file1.py') + canonical_path = files.canonical_filename("sub/proj1/file1.py") + assert canonical_path == self.abs_path("file1.py") # After the filename has been converted, it should be in the cache. - assert 'sub/proj1/file1.py' in files.CANONICAL_FILENAME_CACHE - assert files.canonical_filename('sub/proj1/file1.py') == self.abs_path('file1.py') + assert "sub/proj1/file1.py" in files.CANONICAL_FILENAME_CACHE + assert files.canonical_filename("sub/proj1/file1.py") == self.abs_path("file1.py") @pytest.mark.parametrize( - "curdir, sep", [ + "curdir, sep", + [ ("/", "/"), ("X:\\", "\\"), ], ) def test_relative_dir_for_root(self, curdir: str, sep: str) -> None: - with mock.patch.object(files.os, 'curdir', new=curdir): # type: ignore[attr-defined] - with mock.patch.object(files.os, 'sep', new=sep): # type: ignore[attr-defined] - with mock.patch('coverage.files.os.path.normcase', return_value=curdir): + with mock.patch.object(files.os, "curdir", new=curdir): # type: ignore[attr-defined] + with mock.patch.object(files.os, "sep", new=sep): # type: ignore[attr-defined] + with mock.patch("coverage.files.os.path.normcase", return_value=curdir): files.set_relative_directory() assert files.relative_directory() == curdir @pytest.mark.parametrize( - "to_make, to_check, answer", [ + "to_make, to_check, answer", + [ ("a/b/c/foo.py", "a/b/c/foo.py", True), ("a/b/c/foo.py", "a/b/c/bar.py", False), ("src/files.zip", "src/files.zip/foo.py", True), @@ -107,26 +116,29 @@ def test_source_exists(self, to_make: str, to_check: str, answer: bool) -> None: assert files.source_exists(to_check) == answer -@pytest.mark.parametrize("original, flat", [ - ("abc.py", "abc_py"), - ("hellothere", "hellothere"), - ("a/b/c.py", "z_86bbcbe134d28fd2_c_py"), - ("a/b/defghi.py", "z_86bbcbe134d28fd2_defghi_py"), - ("/a/b/c.py", "z_bb25e0ada04227c6_c_py"), - ("/a/b/defghi.py", "z_bb25e0ada04227c6_defghi_py"), - (r"c:\foo\bar.html", "z_e7c107482373f299_bar_html"), - (r"d:\foo\bar.html", "z_584a05dcebc67b46_bar_html"), - ("MontrÊal/â˜ē/conf.py", "z_c840497a2c647ce0_conf_py"), - ( # original: - r"c:\lorem\ipsum\quia\dolor\sit\amet\consectetur\adipisci\velit\sed" + - r"\quia\non\numquam\eius\modi\tempora\incidunt\ut\labore\et\dolore" + - r"\magnam\aliquam\quaerat\voluptatem\ut\enim\ad\minima\veniam\quis" + - r"\nostrum\exercitationem\ullam\corporis\suscipit\laboriosam" + - r"\MontrÊal\â˜ē\my_program.py", - # flat: - "z_e597dfacb73a23d5_my_program_py", - ), -]) +@pytest.mark.parametrize( + "original, flat", + [ + ("abc.py", "abc_py"), + ("hellothere", "hellothere"), + ("a/b/c.py", "z_86bbcbe134d28fd2_c_py"), + ("a/b/defghi.py", "z_86bbcbe134d28fd2_defghi_py"), + ("/a/b/c.py", "z_bb25e0ada04227c6_c_py"), + ("/a/b/defghi.py", "z_bb25e0ada04227c6_defghi_py"), + (r"c:\foo\bar.html", "z_e7c107482373f299_bar_html"), + (r"d:\foo\bar.html", "z_584a05dcebc67b46_bar_html"), + ("MontrÊal/â˜ē/conf.py", "z_c840497a2c647ce0_conf_py"), + ( # original: + r"c:\lorem\ipsum\quia\dolor\sit\amet\consectetur\adipisci\velit\sed" + + r"\quia\non\numquam\eius\modi\tempora\incidunt\ut\labore\et\dolore" + + r"\magnam\aliquam\quaerat\voluptatem\ut\enim\ad\minima\veniam\quis" + + r"\nostrum\exercitationem\ullam\corporis\suscipit\laboriosam" + + r"\MontrÊal\â˜ē\my_program.py", + # flat: + "z_e597dfacb73a23d5_my_program_py", + ), + ], +) def test_flat_rootname(original: str, flat: str) -> None: assert flat_rootname(original) == flat @@ -151,120 +163,172 @@ def globs_to_regex_params( pat_id = "|".join(patterns) for text in matches: yield pytest.param( - patterns, case_insensitive, partial, text, True, + patterns, + case_insensitive, + partial, + text, + True, id=f"{pat_id}:ci{case_insensitive}:par{partial}:{text}:match", ) for text in nomatches: yield pytest.param( - patterns, case_insensitive, partial, text, False, + patterns, + case_insensitive, + partial, + text, + False, id=f"{pat_id}:ci{case_insensitive}:par{partial}:{text}:nomatch", ) + @pytest.mark.parametrize( "patterns, case_insensitive, partial, text, result", - list(itertools.chain.from_iterable([ - globs_to_regex_params( - ["abc", "xyz"], - matches=["abc", "xyz", "sub/mod/abc"], - nomatches=[ - "ABC", "xYz", "abcx", "xabc", "axyz", "xyza", "sub/mod/abcd", "sub/abc/more", - ], - ), - globs_to_regex_params( - ["abc", "xyz"], case_insensitive=True, - matches=["abc", "xyz", "Abc", "XYZ", "AbC"], - nomatches=["abcx", "xabc", "axyz", "xyza"], - ), - globs_to_regex_params( - ["a*c", "x*z"], - matches=["abc", "xyz", "xYz", "azc", "xaz", "axyzc"], - nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], - ), - globs_to_regex_params( - ["a?c", "x?z"], - matches=["abc", "xyz", "xYz", "azc", "xaz"], - nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], - ), - globs_to_regex_params( - ["a??d"], - matches=["abcd", "azcd", "a12d"], - nomatches=["ABCD", "abcx", "axyz", "abcde"], - ), - globs_to_regex_params( - ["abc/hi.py"], case_insensitive=True, - matches=["abc/hi.py", "ABC/hi.py", r"ABC\hi.py"], - nomatches=["abc_hi.py", "abc/hi.pyc"], - ), - globs_to_regex_params( - [r"abc\hi.py"], case_insensitive=True, - matches=[r"abc\hi.py", r"ABC\hi.py", "abc/hi.py", "ABC/hi.py"], - nomatches=["abc_hi.py", "abc/hi.pyc"], - ), - globs_to_regex_params( - ["abc/*/hi.py"], case_insensitive=True, - matches=["abc/foo/hi.py", r"ABC\foo/hi.py"], - nomatches=["abc/hi.py", "abc/hi.pyc", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], - ), - globs_to_regex_params( - ["abc/**/hi.py"], case_insensitive=True, - matches=[ - "abc/foo/hi.py", r"ABC\foo/hi.py", "abc/hi.py", "ABC/foo/bar/hi.py", - r"ABC\foo/bar/hi.py", - ], - nomatches=["abc/hi.pyc"], - ), - globs_to_regex_params( - ["abc/[a-f]*/hi.py"], case_insensitive=True, - matches=["abc/foo/hi.py", r"ABC\boo/hi.py"], - nomatches=[ - "abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc", "abc/foo/bar/hi.py", - r"abc\foo/bar/hi.py", - ], - ), - globs_to_regex_params( - ["abc/[a-f]/hi.py"], case_insensitive=True, - matches=["abc/f/hi.py", r"ABC\b/hi.py"], - nomatches=[ - "abc/foo/hi.py", "abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc", "abc/foo/bar/hi.py", - r"abc\foo/bar/hi.py", - ], - ), - globs_to_regex_params( - ["abc/"], case_insensitive=True, partial=True, - matches=["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], - nomatches=["abcd/foo.py", "xabc/hi.py"], - ), - globs_to_regex_params( - ["*/foo"], case_insensitive=False, partial=True, - matches=["abc/foo/hi.py", "foo/hi.py", "abc/def/foo/hi.py"], - nomatches=["abc/xfoo/hi.py"], - ), - globs_to_regex_params( - ["*c/foo"], case_insensitive=False, partial=True, - matches=["abc/foo/hi.py"], - nomatches=["abc/xfoo/hi.py", "foo/hi.py", "def/abc/foo/hi.py"], - ), - globs_to_regex_params( - ["foo/x*"], case_insensitive=False, partial=True, - matches=["foo/x", "foo/xhi.py", "foo/x/hi.py"], - nomatches=[], - ), - globs_to_regex_params( - ["foo/x*"], case_insensitive=False, partial=False, - matches=["foo/x", "foo/xhi.py"], - nomatches=["foo/x/hi.py"], - ), - globs_to_regex_params( - ["**/foo"], - matches=["foo", "hello/foo", "hi/there/foo"], - nomatches=["foob", "hello/foob", "hello/Foo"], - ), - globs_to_regex_params( - ["a+b/foo*", "x{y}z/foo*"], - matches=["a+b/foo", "a+b/foobar", "x{y}z/foobar"], - nomatches=["aab/foo", "ab/foo", "xyz/foo"], - ), - ])), + list( + itertools.chain.from_iterable( + [ + globs_to_regex_params( + ["abc", "xyz"], + matches=["abc", "xyz", "sub/mod/abc"], + nomatches=[ + "ABC", + "xYz", + "abcx", + "xabc", + "axyz", + "xyza", + "sub/mod/abcd", + "sub/abc/more", + ], + ), + globs_to_regex_params( + ["abc", "xyz"], + case_insensitive=True, + matches=["abc", "xyz", "Abc", "XYZ", "AbC"], + nomatches=["abcx", "xabc", "axyz", "xyza"], + ), + globs_to_regex_params( + ["a*c", "x*z"], + matches=["abc", "xyz", "xYz", "azc", "xaz", "axyzc"], + nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], + ), + globs_to_regex_params( + ["a?c", "x?z"], + matches=["abc", "xyz", "xYz", "azc", "xaz"], + nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], + ), + globs_to_regex_params( + ["a??d"], + matches=["abcd", "azcd", "a12d"], + nomatches=["ABCD", "abcx", "axyz", "abcde"], + ), + globs_to_regex_params( + ["abc/hi.py"], + case_insensitive=True, + matches=["abc/hi.py", "ABC/hi.py", r"ABC\hi.py"], + nomatches=["abc_hi.py", "abc/hi.pyc"], + ), + globs_to_regex_params( + [r"abc\hi.py"], + case_insensitive=True, + matches=[r"abc\hi.py", r"ABC\hi.py", "abc/hi.py", "ABC/hi.py"], + nomatches=["abc_hi.py", "abc/hi.pyc"], + ), + globs_to_regex_params( + ["abc/*/hi.py"], + case_insensitive=True, + matches=["abc/foo/hi.py", r"ABC\foo/hi.py"], + nomatches=[ + "abc/hi.py", + "abc/hi.pyc", + "ABC/foo/bar/hi.py", + r"ABC\foo/bar/hi.py", + ], + ), + globs_to_regex_params( + ["abc/**/hi.py"], + case_insensitive=True, + matches=[ + "abc/foo/hi.py", + r"ABC\foo/hi.py", + "abc/hi.py", + "ABC/foo/bar/hi.py", + r"ABC\foo/bar/hi.py", + ], + nomatches=["abc/hi.pyc"], + ), + globs_to_regex_params( + ["abc/[a-f]*/hi.py"], + case_insensitive=True, + matches=["abc/foo/hi.py", r"ABC\boo/hi.py"], + nomatches=[ + "abc/zoo/hi.py", + "abc/hi.py", + "abc/hi.pyc", + "abc/foo/bar/hi.py", + r"abc\foo/bar/hi.py", + ], + ), + globs_to_regex_params( + ["abc/[a-f]/hi.py"], + case_insensitive=True, + matches=["abc/f/hi.py", r"ABC\b/hi.py"], + nomatches=[ + "abc/foo/hi.py", + "abc/zoo/hi.py", + "abc/hi.py", + "abc/hi.pyc", + "abc/foo/bar/hi.py", + r"abc\foo/bar/hi.py", + ], + ), + globs_to_regex_params( + ["abc/"], + case_insensitive=True, + partial=True, + matches=["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], + nomatches=["abcd/foo.py", "xabc/hi.py"], + ), + globs_to_regex_params( + ["*/foo"], + case_insensitive=False, + partial=True, + matches=["abc/foo/hi.py", "foo/hi.py", "abc/def/foo/hi.py"], + nomatches=["abc/xfoo/hi.py"], + ), + globs_to_regex_params( + ["*c/foo"], + case_insensitive=False, + partial=True, + matches=["abc/foo/hi.py"], + nomatches=["abc/xfoo/hi.py", "foo/hi.py", "def/abc/foo/hi.py"], + ), + globs_to_regex_params( + ["foo/x*"], + case_insensitive=False, + partial=True, + matches=["foo/x", "foo/xhi.py", "foo/x/hi.py"], + nomatches=[], + ), + globs_to_regex_params( + ["foo/x*"], + case_insensitive=False, + partial=False, + matches=["foo/x", "foo/xhi.py"], + nomatches=["foo/x/hi.py"], + ), + globs_to_regex_params( + ["**/foo"], + matches=["foo", "hello/foo", "hi/there/foo"], + nomatches=["foob", "hello/foob", "hello/Foo"], + ), + globs_to_regex_params( + ["a+b/foo*", "x{y}z/foo*"], + matches=["a+b/foo", "a+b/foobar", "x{y}z/foobar"], + nomatches=["aab/foo", "ab/foo", "xyz/foo"], + ), + ] + ) + ), ) def test_globs_to_regex( patterns: Iterable[str], @@ -277,17 +341,20 @@ def test_globs_to_regex( assert bool(regex.match(text)) == result -@pytest.mark.parametrize("pattern, bad_word", [ - ("***/foo.py", "***"), - ("bar/***/foo.py", "***"), - ("*****/foo.py", "*****"), - ("Hello]there", "]"), - ("Hello[there", "["), - ("x/a**/b.py", "a**"), - ("x/abcd**/b.py", "abcd**"), - ("x/**a/b.py", "**a"), - ("x/**/**/b.py", "**/**"), -]) +@pytest.mark.parametrize( + "pattern, bad_word", + [ + ("***/foo.py", "***"), + ("bar/***/foo.py", "***"), + ("*****/foo.py", "*****"), + ("Hello]there", "]"), + ("Hello[there", "["), + ("x/a**/b.py", "a**"), + ("x/abcd**/b.py", "abcd**"), + ("x/**a/b.py", "**a"), + ("x/**/**/b.py", "**/**"), + ], +) def test_invalid_globs(pattern: str, bad_word: str) -> None: msg = f"File pattern can't include {bad_word!r}" with pytest.raises(ConfigError, match=re.escape(msg)): @@ -338,22 +405,22 @@ def test_tree_matcher(self) -> None: def test_module_matcher(self) -> None: matches_to_try = [ - ('test', True), - ('trash', False), - ('testing', False), - ('test.x', True), - ('test.x.y.z', True), - ('py', False), - ('py.t', False), - ('py.test', True), - ('py.testing', False), - ('py.test.buz', True), - ('py.test.buz.baz', True), - ('__main__', False), - ('mymain', True), - ('yourmain', False), + ("test", True), + ("trash", False), + ("testing", False), + ("test.x", True), + ("test.x.y.z", True), + ("py", False), + ("py.t", False), + ("py.test", True), + ("py.testing", False), + ("py.test.buz", True), + ("py.test.buz.baz", True), + ("__main__", False), + ("mymain", True), + ("yourmain", False), ] - modules = ['test', 'py.test', 'mymain'] + modules = ["test", "py.test", "mymain"] mm = ModuleMatcher(modules) assert mm.info() == modules for modulename, matches in matches_to_try: @@ -393,7 +460,7 @@ def test_glob_windows_paths(self) -> None: @pytest.fixture(params=[False, True], name="rel_yn") def relative_setting(request: pytest.FixtureRequest) -> bool: """Parameterized fixture to choose whether PathAliases is relative or not.""" - return request.param # type: ignore[no-any-return] + return request.param # type: ignore[no-any-return] class PathAliasesTest(CoverageTest): @@ -421,38 +488,38 @@ def assert_unchanged(self, aliases: PathAliases, inp: str, exists: bool = True) def test_noop(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - self.assert_unchanged(aliases, '/ned/home/a.py') + self.assert_unchanged(aliases, "/ned/home/a.py") def test_nomatch(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/home/*/src', './mysrc') - self.assert_unchanged(aliases, '/home/foo/a.py') + aliases.add("/home/*/src", "./mysrc") + self.assert_unchanged(aliases, "/home/foo/a.py") def test_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/ned/home/*/src', './mysrc') - self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') + aliases.add("/ned/home/*/src", "./mysrc") + self.assert_mapped(aliases, "/ned/home/foo/src/a.py", "./mysrc/a.py") aliases = PathAliases(relative=rel_yn) - aliases.add('/ned/home/*/src/', './mysrc') - self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') + aliases.add("/ned/home/*/src/", "./mysrc") + self.assert_mapped(aliases, "/ned/home/foo/src/a.py", "./mysrc/a.py") def test_no_accidental_match(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/home/*/src', './mysrc') - self.assert_unchanged(aliases, '/home/foo/srcetc') + aliases.add("/home/*/src", "./mysrc") + self.assert_unchanged(aliases, "/home/foo/srcetc") def test_no_map_if_not_exist(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/ned/home/*/src', './mysrc') - self.assert_unchanged(aliases, '/ned/home/foo/src/a.py', exists=False) - self.assert_unchanged(aliases, 'foo/src/a.py', exists=False) + aliases.add("/ned/home/*/src", "./mysrc") + self.assert_unchanged(aliases, "/ned/home/foo/src/a.py", exists=False) + self.assert_unchanged(aliases, "foo/src/a.py", exists=False) def test_no_dotslash(self, rel_yn: bool) -> None: # The result shouldn't start with "./" if the map result didn't. aliases = PathAliases(relative=rel_yn) - aliases.add('*/project', '.') - self.assert_mapped(aliases, '/ned/home/project/src/a.py', os_sep('src/a.py')) + aliases.add("*/project", ".") + self.assert_mapped(aliases, "/ned/home/project/src/a.py", os_sep("src/a.py")) def test_relative_pattern(self) -> None: aliases = PathAliases(relative=True) @@ -467,40 +534,43 @@ def test_multiple_patterns(self, rel_yn: bool) -> None: # also test the debugfn... msgs: list[str] = [] aliases = PathAliases(debugfn=msgs.append, relative=rel_yn) - aliases.add('/home/*/src', './mysrc') - aliases.add('/lib/*/libsrc', './mylib') - self.assert_mapped(aliases, '/home/foo/src/a.py', './mysrc/a.py') - self.assert_mapped(aliases, '/lib/foo/libsrc/a.py', './mylib/a.py') + aliases.add("/home/*/src", "./mysrc") + aliases.add("/lib/*/libsrc", "./mylib") + self.assert_mapped(aliases, "/home/foo/src/a.py", "./mysrc/a.py") + self.assert_mapped(aliases, "/lib/foo/libsrc/a.py", "./mylib/a.py") if rel_yn: assert msgs == [ "Aliases (relative=True):", - " Rule: '/home/*/src' -> './mysrc/' using regex " + - "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", - " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + - "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", - "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + - "producing './mysrc/a.py'", - "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + - "producing './mylib/a.py'", + " Rule: '/home/*/src' -> './mysrc/' using regex " + + "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", + " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + + "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", + "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + + "producing './mysrc/a.py'", + "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + + "producing './mylib/a.py'", ] else: assert msgs == [ "Aliases (relative=False):", - " Rule: '/home/*/src' -> './mysrc/' using regex " + - "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", - " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + - "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", - "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + - f"producing {files.canonical_filename('./mysrc/a.py')!r}", - "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + - f"producing {files.canonical_filename('./mylib/a.py')!r}", + " Rule: '/home/*/src' -> './mysrc/' using regex " + + "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", + " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + + "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", + "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + + f"producing {files.canonical_filename('./mysrc/a.py')!r}", + "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + + f"producing {files.canonical_filename('./mylib/a.py')!r}", ] - @pytest.mark.parametrize("badpat", [ - "/ned/home/*", - "/ned/home/*/", - "/ned/home/*/*/", - ]) + @pytest.mark.parametrize( + "badpat", + [ + "/ned/home/*", + "/ned/home/*/", + "/ned/home/*/*/", + ], + ) def test_cant_have_wildcard_at_end(self, badpat: str) -> None: aliases = PathAliases() msg = "Pattern must not end with wildcards." @@ -509,24 +579,24 @@ def test_cant_have_wildcard_at_end(self, badpat: str) -> None: def test_no_accidental_munging(self) -> None: aliases = PathAliases() - aliases.add(r'c:\Zoo\boo', 'src/') - aliases.add('/home/ned$', 'src/') - self.assert_mapped(aliases, r'c:\Zoo\boo\foo.py', 'src/foo.py') - self.assert_mapped(aliases, r'/home/ned$/foo.py', 'src/foo.py') + aliases.add(r"c:\Zoo\boo", "src/") + aliases.add("/home/ned$", "src/") + self.assert_mapped(aliases, r"c:\Zoo\boo\foo.py", "src/foo.py") + self.assert_mapped(aliases, r"/home/ned$/foo.py", "src/foo.py") def test_paths_are_os_corrected(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/home/ned/*/src', './mysrc') - aliases.add(r'c:\ned\src', './mysrc') - self.assert_mapped(aliases, r'C:\Ned\src\sub\a.py', './mysrc/sub/a.py') + aliases.add("/home/ned/*/src", "./mysrc") + aliases.add(r"c:\ned\src", "./mysrc") + self.assert_mapped(aliases, r"C:\Ned\src\sub\a.py", "./mysrc/sub/a.py") aliases = PathAliases(relative=rel_yn) - aliases.add('/home/ned/*/src', r'.\mysrc') - aliases.add(r'c:\ned\src', r'.\mysrc') + aliases.add("/home/ned/*/src", r".\mysrc") + aliases.add(r"c:\ned\src", r".\mysrc") self.assert_mapped( aliases, - r'/home/ned/foo/src/sub/a.py', - r'.\mysrc\sub\a.py', + r"/home/ned/foo/src/sub/a.py", + r".\mysrc\sub\a.py", ) # Try the paths in both orders. @@ -604,16 +674,16 @@ def test_implicit_relative_linux_on_windows(self) -> None: def test_multiple_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('/home/jenkins/*/a/*/b/*/django', './django') + aliases.add("/home/jenkins/*/a/*/b/*/django", "./django") self.assert_mapped( aliases, - '/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py', - './django/foo/bar.py', + "/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py", + "./django/foo/bar.py", ) def test_windows_root_paths(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('X:\\', '/tmp/src') + aliases.add("X:\\", "/tmp/src") self.assert_mapped( aliases, "X:\\a\\file.py", @@ -627,10 +697,10 @@ def test_windows_root_paths(self, rel_yn: bool) -> None: def test_leading_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) - aliases.add('*/d1', './mysrc1') - aliases.add('*/d2', './mysrc2') - self.assert_mapped(aliases, '/foo/bar/d1/x.py', './mysrc1/x.py') - self.assert_mapped(aliases, '/foo/bar/d2/y.py', './mysrc2/y.py') + aliases.add("*/d1", "./mysrc1") + aliases.add("*/d2", "./mysrc2") + self.assert_mapped(aliases, "/foo/bar/d1/x.py", "./mysrc1/x.py") + self.assert_mapped(aliases, "/foo/bar/d2/y.py", "./mysrc2/y.py") @pytest.mark.parametrize("dirname", [".", "..", "../other", "/"]) def test_dot(self, dirname: str) -> None: @@ -639,13 +709,13 @@ def test_dot(self, dirname: str) -> None: # and I'm not sure how it should work on Windows, so skip it. pytest.skip("Don't know how to handle root on Windows") aliases = PathAliases() - aliases.add(dirname, '/the/source') - the_file = os.path.join(dirname, 'a.py') + aliases.add(dirname, "/the/source") + the_file = os.path.join(dirname, "a.py") the_file = os.path.expanduser(the_file) the_file = os.path.abspath(os.path.realpath(the_file)) - assert '~' not in the_file # to be sure the test is pure. - self.assert_mapped(aliases, the_file, '/the/source/a.py') + assert "~" not in the_file # to be sure the test is pure. + self.assert_mapped(aliases, the_file, "/the/source/a.py") class PathAliasesRealFilesTest(CoverageTest): @@ -667,35 +737,45 @@ class FindPythonFilesTest(CoverageTest): def test_find_python_files(self) -> None: self.make_file("sub/a.py") self.make_file("sub/b.py") - self.make_file("sub/x.c") # nope: not .py + self.make_file("sub/x.c") # nope: not .py self.make_file("sub/ssub/__init__.py") self.make_file("sub/ssub/s.py") - self.make_file("sub/ssub/~s.py") # nope: editor effluvia - self.make_file("sub/lab/exp.py") # nope: no __init__.py + self.make_file("sub/ssub/~s.py") # nope: editor effluvia + self.make_file("sub/lab/exp.py") # nope: no __init__.py self.make_file("sub/windows.pyw") py_files = set(find_python_files("sub", include_namespace_packages=False)) - self.assert_same_files(py_files, [ - "sub/a.py", "sub/b.py", - "sub/ssub/__init__.py", "sub/ssub/s.py", - "sub/windows.pyw", - ]) + self.assert_same_files( + py_files, + [ + "sub/a.py", + "sub/b.py", + "sub/ssub/__init__.py", + "sub/ssub/s.py", + "sub/windows.pyw", + ], + ) def test_find_python_files_include_namespace_packages(self) -> None: self.make_file("sub/a.py") self.make_file("sub/b.py") - self.make_file("sub/x.c") # nope: not .py + self.make_file("sub/x.c") # nope: not .py self.make_file("sub/ssub/__init__.py") self.make_file("sub/ssub/s.py") - self.make_file("sub/ssub/~s.py") # nope: editor effluvia + self.make_file("sub/ssub/~s.py") # nope: editor effluvia self.make_file("sub/lab/exp.py") self.make_file("sub/windows.pyw") py_files = set(find_python_files("sub", include_namespace_packages=True)) - self.assert_same_files(py_files, [ - "sub/a.py", "sub/b.py", - "sub/ssub/__init__.py", "sub/ssub/s.py", - "sub/lab/exp.py", - "sub/windows.pyw", - ]) + self.assert_same_files( + py_files, + [ + "sub/a.py", + "sub/b.py", + "sub/ssub/__init__.py", + "sub/ssub/s.py", + "sub/lab/exp.py", + "sub/windows.pyw", + ], + ) @pytest.mark.skipif(not env.WINDOWS, reason="Only need to run Windows tests on Windows.") @@ -705,4 +785,4 @@ class WindowsFileTest(CoverageTest): run_in_temp_dir = False def test_actual_path(self) -> None: - assert actual_path(r'c:\Windows') == actual_path(r'C:\wINDOWS') + assert actual_path(r"c:\Windows") == actual_path(r"C:\wINDOWS") diff --git a/tests/test_goldtest.py b/tests/test_goldtest.py index 57df48829..f4fdd6ca9 100644 --- a/tests/test_goldtest.py +++ b/tests/test_goldtest.py @@ -31,14 +31,16 @@ SCRUBS = [ # Numbers don't matter when comparing. - (r'\d+', 'D'), - (r'G\w+', 'Gxxx'), + (r"\d+", "D"), + (r"G\w+", "Gxxx"), ] + def path_regex(path: str) -> str: """Convert a file path into a regex that will match that path on any OS.""" return re.sub(r"[/\\]", r"[/\\\\]", path.replace(".", "[.]")) + ACTUAL_DIR = os.path.join(TESTS_DIR, "actual/testing") ACTUAL_GETTY_FILE = os.path.join(ACTUAL_DIR, "getty/gettysburg.txt") GOLD_GETTY_FILE = os.path.join(TESTS_DIR, "gold/testing/getty/gettysburg.txt") @@ -47,6 +49,7 @@ def path_regex(path: str) -> str: GOLD_PATH_RX = path_regex("/tests/gold/testing/getty/gettysburg.txt") OUT_PATH_RX = path_regex("out/gettysburg.txt") + @pytest.mark.xdist_group(name="compare_test") class CompareTest(CoverageTest): """Tests of goldtest.py:compare()""" @@ -64,7 +67,7 @@ def test_bad(self) -> None: self.make_file("out/gettysburg.txt", BAD_GETTY) # compare() raises an assertion. - msg = fr"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" + msg = rf"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/getty"), "out", scrubs=SCRUBS) @@ -72,12 +75,11 @@ def test_bad(self) -> None: stdout = self.stdout() assert "- Four score" in stdout assert "+ Five score" in stdout - assert re_line(fr"^:::: diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) - assert re_line(fr"^:::: end diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) - assert ( - os_sep(f"Saved actual output to '{ACTUAL_GETTY_FILE}': see tests/gold/README.rst") - in os_sep(stdout) - ) + assert re_line(rf"^:::: diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) + assert re_line(rf"^:::: end diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) + assert os_sep( + f"Saved actual output to '{ACTUAL_GETTY_FILE}': see tests/gold/README.rst" + ) in os_sep(stdout) assert " D/D/D, Gxxx, Pennsylvania" in stdout # The actual file was saved. @@ -90,7 +92,7 @@ def test_good_needs_scrubs(self) -> None: self.make_file("out/gettysburg.txt", GOOD_GETTY) # compare() raises an assertion. - msg = fr"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" + msg = rf"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/getty"), "out") @@ -116,30 +118,36 @@ def test_actual_extra(self) -> None: compare(gold_path("testing/getty"), "out", file_pattern="*.txt", scrubs=SCRUBS) def test_xml_good(self) -> None: - self.make_file("out/output.xml", """\ + self.make_file( + "out/output.xml", + """\ Goodie - """) + """, + ) compare(gold_path("testing/xml"), "out", scrubs=SCRUBS) def test_xml_bad(self) -> None: - self.make_file("out/output.xml", """\ + self.make_file( + "out/output.xml", + """\ Goodbye - """) + """, + ) # compare() raises an exception. gold_rx = path_regex(gold_path("testing/xml/output.xml")) out_rx = path_regex("out/output.xml") - msg = fr"Files differ: .*{gold_rx} != {out_rx}" + msg = rf"Files differ: .*{gold_rx} != {out_rx}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/xml"), "out", scrubs=SCRUBS) @@ -157,24 +165,24 @@ class ContainsTest(CoverageTest): def test_contains(self) -> None: contains(GOLD_GETTY_FILE, "Four", "fathers", "dedicated") - msg = fr"Missing content in {GOLD_GETTY_FILE_RX}: 'xyzzy'" + msg = rf"Missing content in {GOLD_GETTY_FILE_RX}: 'xyzzy'" with pytest.raises(AssertionError, match=msg): contains(GOLD_GETTY_FILE, "Four", "fathers", "xyzzy", "dedicated") def test_contains_rx(self) -> None: contains_rx(GOLD_GETTY_FILE, r"Fo.r", r"f[abc]thers", "dedi[cdef]ated") - msg = fr"Missing regex in {GOLD_GETTY_FILE_RX}: r'm\[opq\]thers'" + msg = rf"Missing regex in {GOLD_GETTY_FILE_RX}: r'm\[opq\]thers'" with pytest.raises(AssertionError, match=msg): contains_rx(GOLD_GETTY_FILE, r"Fo.r", r"m[opq]thers") def test_contains_any(self) -> None: contains_any(GOLD_GETTY_FILE, "Five", "Four", "Three") - msg = fr"Missing content in {GOLD_GETTY_FILE_RX}: 'One' \[1 of 3\]" + msg = rf"Missing content in {GOLD_GETTY_FILE_RX}: 'One' \[1 of 3\]" with pytest.raises(AssertionError, match=msg): contains_any(GOLD_GETTY_FILE, "One", "Two", "Three") def test_doesnt_contain(self) -> None: doesnt_contain(GOLD_GETTY_FILE, "One", "Two", "Three") - msg = fr"Forbidden content in {GOLD_GETTY_FILE_RX}: 'Four'" + msg = rf"Forbidden content in {GOLD_GETTY_FILE_RX}: 'Four'" with pytest.raises(AssertionError, match=msg): doesnt_contain(GOLD_GETTY_FILE, "Three", "Four", "Five") diff --git a/tests/test_html.py b/tests/test_html.py index a8ad3925c..153a7058d 100644 --- a/tests/test_html.py +++ b/tests/test_html.py @@ -40,20 +40,29 @@ class HtmlTestHelpers(CoverageTest): def create_initial_files(self) -> None: """Create the source files we need to run these tests.""" - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ import helper1, helper2 helper1.func1(12) helper2.func2(12) - """) - self.make_file("helper1.py", """\ + """, + ) + self.make_file( + "helper1.py", + """\ def func1(x): if x % 2: print("odd") - """) - self.make_file("helper2.py", """\ + """, + ) + self.make_file( + "helper2.py", + """\ def func2(x): print("x is %d" % x) - """) + """, + ) def run_coverage( self, @@ -106,7 +115,7 @@ def assert_correct_timestamp(self, html: str) -> None: timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})" m = re.search(timestamp_pat, html) assert m, "Didn't find a time stamp!" - timestamp = datetime.datetime(*[int(v) for v in m.groups()]) # type: ignore[arg-type] + timestamp = datetime.datetime(*[int(v) for v in m.groups()]) # type: ignore[arg-type] # The time stamp only records the minute, so the delta could be from # 12:00 to 12:01:59, or two minutes. self.assert_recent_datetime( @@ -126,13 +135,13 @@ def assert_valid_hrefs(self, directory: str = "htmlcov") -> None: html = fhtml.read() for href in re.findall(r""" href=['"]([^'"]*)['"]""", html): if href.startswith("#"): - assert re.search(fr""" id=['"]{href[1:]}['"]""", html), ( + assert re.search(rf""" id=['"]{href[1:]}['"]""", html), ( f"Fragment {href!r} in {fname} has no anchor" ) continue if "://" in href: continue - href = href.partition("#")[0] # ignore fragment in URLs. + href = href.partition("#")[0] # ignore fragment in URLs. hrefs[href].add(fname) for href, sources in hrefs.items(): assert os.path.exists(f"{directory}/{href}"), ( @@ -145,6 +154,7 @@ class HtmlReportParser(HTMLParser): Assertions are made about the structure we expect. """ + def __init__(self) -> None: super().__init__() self.lines: list[list[str]] = [] @@ -179,13 +189,14 @@ def text(self) -> list[str]: class FileWriteTracker: """A fake object to track how `open` is used to write files.""" + def __init__(self, written: set[str]) -> None: self.written = written def open(self, filename: str, mode: str = "r", encoding: str | None = None) -> IO[str]: """Be just like `open`, but write written file names to `self.written`.""" if mode.startswith("w"): - self.written.add(filename.replace('\\', '/')) + self.written.add(filename.replace("\\", "/")) return open(filename, mode, encoding=encoding) @@ -215,7 +226,7 @@ def run_coverage( """ covargs = covargs or {} - covargs['source'] = "." + covargs["source"] = "." self.files_written = set() mock_open = FileWriteTracker(self.files_written).open with mock.patch("coverage.html.open", mock_open): @@ -236,7 +247,7 @@ def assert_htmlcov_files_exist(self) -> None: files = os.listdir("htmlcov") for static in statics: base, ext = os.path.splitext(static) - busted_file_pattern = fr"{base}_cb_\w{{8}}{ext}" + busted_file_pattern = rf"{base}_cb_\w{{8}}{ext}" matches = [m for f in files if (m := re.fullmatch(busted_file_pattern, f))] assert len(matches) == 1, f"Found {len(matches)} files for {static}" @@ -254,11 +265,14 @@ def test_html_delta_from_source_change(self) -> None: index1 = self.get_html_index_content() # Now change a file (but only in a comment) and do it again. - self.make_file("helper1.py", """\ + self.make_file( + "helper1.py", + """\ def func1(x): # A nice function if x % 2: print("odd") - """) + """, + ) self.run_coverage() @@ -281,11 +295,14 @@ def test_html_delta_from_coverage_change(self) -> None: # Now change a file and do it again. main_file is different, and calls # helper1 differently. - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ import helper1, helper2 helper1.func1(23) helper2.func2(23) - """) + """, + ) self.run_coverage() @@ -304,7 +321,7 @@ def test_html_delta_from_settings_change(self) -> None: self.run_coverage(covargs=dict(omit=[])) index1 = self.get_html_index_content() - self.run_coverage(covargs=dict(omit=['xyzzy*'])) + self.run_coverage(covargs=dict(omit=["xyzzy*"])) # All the files have been reported again. self.assert_htmlcov_files_exist() @@ -345,12 +362,15 @@ def test_file_becomes_100(self) -> None: self.run_coverage() # Now change a file and do it again - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ import helper1, helper2 # helper1 is now 100% helper1.func1(12) helper1.func1(23) - """) + """, + ) self.run_coverage(htmlargs=dict(skip_covered=True)) @@ -364,8 +384,8 @@ def test_status_format_change(self) -> None: with open("htmlcov/status.json", encoding="utf-8") as status_json: status_data = json.load(status_json) - assert status_data['format'] == 5 - status_data['format'] = 99 + assert status_data["format"] == 5 + status_data["format"] = 99 with open("htmlcov/status.json", "w", encoding="utf-8") as status_json: json.dump(status_data, status_json) @@ -425,8 +445,8 @@ def test_title_set_in_args(self) -> None: self.run_coverage(htmlargs=dict(title="ÂĢĪ„ÎąĐ‘ĐŦâ„“ĪƒÂģ & stÃŧff!")) index = self.get_html_index_content() expected = ( - "«ταБЬℓσ» " + - "& stüff!" + "«ταБЬℓσ» " + + "& stüff!" ) assert expected in index assert "

«ταБЬℓσ» & stüff!:" in index @@ -505,10 +525,13 @@ def test_decode_error(self) -> None: # file after running. self.make_file("main.py", "import sub.not_ascii") self.make_file("sub/__init__.py") - self.make_file("sub/not_ascii.py", """\ + self.make_file( + "sub/not_ascii.py", + """\ # coding: utf-8 a = 1 # Isn't this great?! - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "main") @@ -544,7 +567,9 @@ def test_splitlines_special_chars(self) -> None: # I'm not exactly sure why we need the "a" strings here, but the old # code wasn't failing without them. - self.make_file("splitlines_is_weird.py", """\ + self.make_file( + "splitlines_is_weird.py", + """\ test = { "0b": ["\x0b0"], "a1": "this is line 2", "0c": ["\x0c0"], "a2": "this is line 3", @@ -556,7 +581,8 @@ def test_splitlines_special_chars(self) -> None: "2029": ["\u20290"], "a8": "this is line 9", } DONE = 1 - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "splitlines_is_weird") cov.html_report() @@ -579,7 +605,7 @@ def test_missing_source_file_incorrect_message(self) -> None: self.make_file("sub/__init__.py", "") self.make_file("sub/another.py", "print('another')\n") cov = coverage.Coverage() - self.start_import_stop(cov, 'thefile') + self.start_import_stop(cov, "thefile") os.remove("sub/another.py") missing_file = os.path.join(self.temp_dir, "sub", "another.py") @@ -594,10 +620,12 @@ def test_extensionless_file_collides_with_extension(self) -> None: # https://github.com/nedbat/coveragepy/issues/69 self.make_file("program", "import program\n") self.make_file("program.py", "a = 1\n") - self.make_data_file(lines={ - abs_file("program"): [1], - abs_file("program.py"): [1], - }) + self.make_data_file( + lines={ + abs_file("program"): [1], + abs_file("program.py"): [1], + } + ) cov = coverage.Coverage() cov.load() cov.html_report() @@ -619,23 +647,29 @@ def test_reporting_on_unmeasured_file(self) -> None: # measured at all. https://github.com/nedbat/coveragepy/issues/403 self.create_initial_files() self.make_file("other.py", "a = 1\n") - self.run_coverage(htmlargs=dict(morfs=['other.py'])) + self.run_coverage(htmlargs=dict(morfs=["other.py"])) self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/other_py.html") def make_main_and_not_covered(self) -> None: """Helper to create files for skip_covered scenarios.""" - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ import not_covered def normal(): print("z") normal() - """) - self.make_file("not_covered.py", """\ + """, + ) + self.make_file( + "not_covered.py", + """\ def not_covered(): print("n") - """) + """, + ) def test_report_skip_covered(self) -> None: self.make_main_and_not_covered() @@ -662,11 +696,14 @@ def test_report_skip_covered_branches(self) -> None: self.assert_exists("htmlcov/not_covered_py.html") def test_report_skip_covered_100(self) -> None: - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def normal(): print("z") normal() - """) + """, + ) res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True)) assert res == 100.0 self.assert_doesnt_exist("htmlcov/main_file_py.html") @@ -676,13 +713,16 @@ def normal(): self.assert_doesnt_exist("htmlcov/class_index.html") def test_report_skip_covered_100_functions(self) -> None: - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def normal(): print("z") def abnormal(): print("a") normal() - """) + """, + ) res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True)) assert res == 80.0 self.assert_exists("htmlcov/main_file_py.html") @@ -694,13 +734,16 @@ def abnormal(): def make_init_and_main(self) -> None: """Helper to create files for skip_empty scenarios.""" self.make_file("submodule/__init__.py", "") - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ import submodule def normal(): print("z") normal() - """) + """, + ) def test_report_skip_empty(self) -> None: self.make_init_and_main() @@ -736,25 +779,25 @@ def compare_html( extra_scrubs: list[tuple[str, str]] | None = None, ) -> None: """Specialized compare function for our HTML files.""" - __tracebackhide__ = True # pytest, please don't show me this function. + __tracebackhide__ = True # pytest, please don't show me this function. scrubs = [ - (r'/coverage\.readthedocs\.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), - (r'coverage\.py v[\d.abcdev]+', 'coverage.py vVER'), - (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d [-+]\d\d\d\d', 'created at DATE'), - (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d', 'created at DATE'), + (r"/coverage\.readthedocs\.io/?[-.\w/]*", "/coverage.readthedocs.io/VER"), + (r"coverage\.py v[\d.abcdev]+", "coverage.py vVER"), + (r"created at \d\d\d\d-\d\d-\d\d \d\d:\d\d [-+]\d\d\d\d", "created at DATE"), + (r"created at \d\d\d\d-\d\d-\d\d \d\d:\d\d", "created at DATE"), # Static files have cache busting. - (r'_cb_\w{8}\.', '_CB.'), + (r"_cb_\w{8}\.", "_CB."), # Occasionally an absolute path is in the HTML report. - (filepath_to_regex(TESTS_DIR), 'TESTS_DIR'), - (filepath_to_regex(flat_rootname(str(TESTS_DIR))), '_TESTS_DIR'), + (filepath_to_regex(TESTS_DIR), "TESTS_DIR"), + (filepath_to_regex(flat_rootname(str(TESTS_DIR))), "_TESTS_DIR"), # The temp dir the tests make. - (filepath_to_regex(os.getcwd()), 'TEST_TMPDIR'), - (filepath_to_regex(flat_rootname(str(os.getcwd()))), '_TEST_TMPDIR'), - (filepath_to_regex(abs_file(os.getcwd())), 'TEST_TMPDIR'), - (filepath_to_regex(flat_rootname(str(abs_file(os.getcwd())))), '_TEST_TMPDIR'), - (r'/private/var/[\w/]+/pytest-of-\w+/pytest-\d+/(popen-gw\d+/)?t\d+', 'TEST_TMPDIR'), + (filepath_to_regex(os.getcwd()), "TEST_TMPDIR"), + (filepath_to_regex(flat_rootname(str(os.getcwd()))), "_TEST_TMPDIR"), + (filepath_to_regex(abs_file(os.getcwd())), "TEST_TMPDIR"), + (filepath_to_regex(flat_rootname(str(abs_file(os.getcwd())))), "_TEST_TMPDIR"), + (r"/private/var/[\w/]+/pytest-of-\w+/pytest-\d+/(popen-gw\d+/)?t\d+", "TEST_TMPDIR"), # If the gold files were created on Windows, we need to scrub Windows paths also: - (r'[A-Z]:\\Users\\[\w\\]+\\pytest-of-\w+\\pytest-\d+\\(popen-gw\d+\\)?t\d+', 'TEST_TMPDIR'), + (r"[A-Z]:\\Users\\[\w\\]+\\pytest-of-\w+\\pytest-\d+\\(popen-gw\d+\\)?t\d+", "TEST_TMPDIR"), ] if extra_scrubs: scrubs += extra_scrubs @@ -778,25 +821,32 @@ class HtmlGoldTest(HtmlTestHelpers, CoverageTest): """Tests of HTML reporting that use gold files.""" def test_a(self) -> None: - self.make_file("a.py", """\ + self.make_file( + "a.py", + """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 - """) + """, + ) cov = coverage.Coverage() a = self.start_import_stop(cov, "a") - cov.html_report(a, directory='out/a') + cov.html_report(a, directory="out/a") compare_html(gold_path("html/a"), "out/a") contains( "out/a/a_py.html", - ('if 1 ' + - '< 2'), - (' a ' + - '= 3'), + ( + 'if 1 ' + + '< 2' + ), + ( + ' a ' + + '= 3' + ), '67%', ) contains( @@ -807,7 +857,9 @@ def test_a(self) -> None: ) def test_b_branch(self) -> None: - self.make_file("b.py", """\ + self.make_file( + "b.py", + """\ def one(x): # This will be a branch that misses the else. if x < 2: @@ -835,7 +887,8 @@ def three(): pass three() - """) + """, + ) cov = coverage.Coverage(branch=True) b = self.start_import_stop(cov, "b") @@ -843,21 +896,30 @@ def three(): compare_html(gold_path("html/b_branch"), "out/b_branch") contains( "out/b_branch/b_py.html", - ('if x ' + - '< 2'), - (' a = ' + - '3'), + ( + 'if x ' + + '< 2' + ), + ( + ' a = ' + + '3' + ), '70%', - - ('3 ↛ 6' + - 'line 3 didn\'t jump to line 6 ' + - 'because the condition on line 3 was always true'), - ('12 ↛ exit' + - 'line 12 didn\'t return from function \'two\' ' + - 'because the condition on line 12 was always true'), - ('20 ↛ anywhere' + - 'line 20 didn\'t jump anywhere: ' + - 'it always raised an exception.'), + ( + '3 ↛ 6' + + 'line 3 didn\'t jump to line 6 ' + + "because the condition on line 3 was always true" + ), + ( + '12 ↛ exit' + + "line 12 didn't return from function 'two' " + + "because the condition on line 12 was always true" + ), + ( + '20 ↛ anywhere' + + 'line 20 didn\'t jump anywhere: ' + + "it always raised an exception." + ), ) contains( "out/b_branch/index.html", @@ -867,13 +929,16 @@ def three(): ) def test_bom(self) -> None: - self.make_file("bom.py", bytes=b"""\ + self.make_file( + "bom.py", + bytes=b"""\ \xef\xbb\xbf# A Python source file in utf-8, with BOM. math = "3\xc3\x974 = 12, \xc3\xb72 = 6\xc2\xb10" assert len(math) == 18 assert len(math.encode('utf-8')) == 21 -""".replace(b"\n", b"\r\n")) +""".replace(b"\n", b"\r\n"), + ) # It's important that the source file really have a BOM, which can # get lost, so check that it's really there, and that we have \r\n @@ -894,13 +959,16 @@ def test_bom(self) -> None: ) def test_isolatin1(self) -> None: - self.make_file("isolatin1.py", bytes=b"""\ + self.make_file( + "isolatin1.py", + bytes=b"""\ # -*- coding: iso8859-1 -*- # A Python source file in another encoding. math = "3\xd74 = 12, \xf72 = 6\xb10" assert len(math) == 18 -""") +""", + ) cov = coverage.Coverage() isolatin1 = self.start_import_stop(cov, "isolatin1") @@ -914,7 +982,9 @@ def test_isolatin1(self) -> None: def make_main_etc(self) -> None: """Make main.py and m1-m3.py for other tests.""" - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import m1 import m2 import m3 @@ -925,19 +995,29 @@ def make_main_etc(self) -> None: assert m1.m1a == 1 assert m2.m2a == 1 assert m3.m3a == 1 - """) - self.make_file("m1.py", """\ + """, + ) + self.make_file( + "m1.py", + """\ m1a = 1 m1b = 2 - """) - self.make_file("m2.py", """\ + """, + ) + self.make_file( + "m2.py", + """\ m2a = 1 m2b = 2 - """) - self.make_file("m3.py", """\ + """, + ) + self.make_file( + "m3.py", + """\ m3a = 1 m3b = 2 - """) + """, + ) def test_omit_1(self) -> None: self.make_main_etc() @@ -962,10 +1042,13 @@ def test_omit_3(self) -> None: def test_omit_4(self) -> None: self.make_main_etc() - self.make_file("omit4.ini", """\ + self.make_file( + "omit4.ini", + """\ [report] omit = m2.py - """) + """, + ) cov = coverage.Coverage(config_file="omit4.ini", include=["./*"]) self.start_import_stop(cov, "main") @@ -974,7 +1057,9 @@ def test_omit_4(self) -> None: def test_omit_5(self) -> None: self.make_main_etc() - self.make_file("omit5.ini", """\ + self.make_file( + "omit5.ini", + """\ [report] omit = fooey @@ -983,7 +1068,8 @@ def test_omit_5(self) -> None: [html] directory = out/omit_5 - """) + """, + ) cov = coverage.Coverage(config_file="omit5.ini", include=["./*"]) self.start_import_stop(cov, "main") @@ -991,20 +1077,26 @@ def test_omit_5(self) -> None: compare_html(gold_path("html/omit_5"), "out/omit_5") def test_other(self) -> None: - self.make_file("src/here.py", """\ + self.make_file( + "src/here.py", + """\ import other if 1 < 2: h = 3 else: h = 4 - """) - self.make_file("othersrc/other.py", """\ + """, + ) + self.make_file( + "othersrc/other.py", + """\ # A file in another directory. We're checking that it ends up in the # HTML report. print("This is the other src!") - """) + """, + ) with change_dir("src"): sys.path.insert(0, "../othersrc") @@ -1018,21 +1110,24 @@ def test_other(self) -> None: os.rename(actual_file[0], "out/other/blah_blah_other_py.html") compare_html( - gold_path("html/other"), "out/other", + gold_path("html/other"), + "out/other", extra_scrubs=[ (r'href="z_[0-9a-z]{16}_other_', 'href="_TEST_TMPDIR_other_othersrc_'), - (r'TEST_TMPDIR\\othersrc\\other.py', 'TEST_TMPDIR/othersrc/other.py'), + (r"TEST_TMPDIR\\othersrc\\other.py", "TEST_TMPDIR/othersrc/other.py"), ], ) contains( - 'out/other/index.html', + "out/other/index.html", 'here.py', 'other_py.html">', - 'other.py', + "other.py", ) def test_partial(self) -> None: - self.make_file("partial.py", """\ + self.make_file( + "partial.py", + """\ # partial branches and excluded lines a = 2 @@ -1050,15 +1145,19 @@ def test_partial(self) -> None: if a == 16: raise ZeroDivisionError("17") - """) - self.make_file("partial.ini", """\ + """, + ) + self.make_file( + "partial.ini", + """\ [run] branch = True [report] exclude_lines = raise ZeroDivisionError - """) + """, + ) cov = coverage.Coverage(config_file="partial.ini") partial = self.start_import_stop(cov, "partial") @@ -1099,13 +1198,16 @@ def test_partial(self) -> None: ) def test_styled(self) -> None: - self.make_file("a.py", """\ + self.make_file( + "a.py", + """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 - """) + """, + ) self.make_file("myfile/myextra.css", "/* Doesn't matter what's here, it gets copied. */\n") @@ -1119,10 +1221,14 @@ def test_styled(self) -> None: contains_rx( "out/styled/a_py.html", r'', - (r'if 1 ' + - r'< 2'), - (r' a = ' + - r'3'), + ( + r'if 1 ' + + r'< 2' + ), + ( + r' a = ' + + r'3' + ), r'67%', ) contains_rx( @@ -1133,7 +1239,9 @@ def test_styled(self) -> None: ) def test_multiline(self) -> None: - self.make_file("multiline.py", """\ + self.make_file( + "multiline.py", + """\ x = 0 if ( x or x @@ -1152,7 +1260,8 @@ def test_multiline(self) -> None: print( "never" ) - """) + """, + ) cov = coverage.Coverage(branch=True) multiline = self.start_import_stop(cov, "multiline") cov.html_report(multiline, directory="out/multiline") @@ -1171,14 +1280,17 @@ def test_tabbed(self) -> None: # if x: # look nice # b = "no spaces" # when they # c = "done" # line up. - self.make_file("tabbed.py", """\ + self.make_file( + "tabbed.py", + """\ x = 1 if x: \ta = "Tabbed"\t\t\t\t# Aligned comments \tif x:\t\t\t\t\t# look nice \t\tb = "No spaces"\t\t\t# when they \tc = "Done"\t\t\t\t# line up. - """) + """, + ) cov = coverage.Coverage() tabbed = self.start_import_stop(cov, "tabbed") @@ -1189,21 +1301,24 @@ def test_tabbed(self) -> None: contains( "out/tabbed_py.html", - '> if ' + - 'x:' + - ' ' + - '# look nice', + '> if ' + + 'x:' + + " " + + '# look nice', ) doesnt_contain("out/tabbed_py.html", "\t") def test_bug_1828(self) -> None: # https://github.com/nedbat/coveragepy/pull/1828 - self.make_file("backslashes.py", """\ + self.make_file( + "backslashes.py", + """\ a = ["aaa",\\ "bbb \\ ccc"] - """) + """, + ) cov = coverage.Coverage() backslashes = self.start_import_stop(cov, "backslashes") @@ -1213,25 +1328,28 @@ def test_bug_1828(self) -> None: "htmlcov/backslashes_py.html", # line 2 is `"bbb \` r'2' - + r' "bbb \', + + r' "bbb \', # line 3 is `ccc"]` r'3' - + r' ccc"]', + + r' ccc"]', ) assert self.get_html_report_text_lines("backslashes.py") == [ '1a = ["aaa",\\', '2 "bbb \\', '3 ccc"]', - ] + ] @pytest.mark.parametrize( - "leader", ["", "f", "r", "fr", "rf"], - ids=["string", "f-string", "raw_string", "f-raw_string", "raw_f-string"] + "leader", + ["", "f", "r", "fr", "rf"], + ids=["string", "f-string", "raw_string", "f-raw_string", "raw_f-string"], ) def test_bug_1836(self, leader: str) -> None: # https://github.com/nedbat/coveragepy/issues/1836 - self.make_file("py312_fstrings.py", f"""\ + self.make_file( + "py312_fstrings.py", + f"""\ prog_name = 'bug.py' err_msg = {leader}'''\\ {{prog_name}}: ERROR: This is the first line of the error. @@ -1239,7 +1357,8 @@ def test_bug_1836(self, leader: str) -> None: \\ {{prog_name}}: ERROR: This is the third line of the error. ''' - """) + """, + ) cov = coverage.Coverage() py312_fstrings = self.start_import_stop(cov, "py312_fstrings") @@ -1253,13 +1372,16 @@ def test_bug_1836(self, leader: str) -> None: "5" + "\\", "6" + "{prog_name}: ERROR: This is the third line of the error.", "7" + "'''", - ] + ] def test_bug_1980(self) -> None: - self.make_file("fstring_middle.py", """\ + self.make_file( + "fstring_middle.py", + """\ x = 1 f'Look: {x} {{x}}!' - """) + """, + ) cov = coverage.Coverage() the_mod = self.start_import_stop(cov, "fstring_middle") @@ -1268,18 +1390,21 @@ def test_bug_1980(self) -> None: assert self.get_html_report_text_lines("fstring_middle.py") == [ "1" + "x = 1", "2" + "f'Look: {x} {{x}}!'", - ] + ] def test_unicode(self) -> None: surrogate = "\U000e0100" - self.make_file("unicode.py", """\ + self.make_file( + "unicode.py", + """\ # -*- coding: utf-8 -*- # A Python source file with exotic characters. upside_down = "ʎd˙ĮbɐɚĮĘŒoɔ" surrogate = "db40,dd00: x@" - """.replace("@", surrogate)) + """.replace("@", surrogate), + ) cov = coverage.Coverage() unimod = self.start_import_stop(cov, "unicode") @@ -1375,11 +1500,12 @@ def test_dynamic_contexts(self) -> None: cov.set_option("html:show_contexts", True) mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) - context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] + context_labels = [self.EMPTY, "two_tests.test_one", "two_tests.test_two"] expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] for label, expected in zip(context_labels, expected_lines): actual = [ - ld.number for ld in d.lines + ld.number + for ld in d.lines if label == ld.contexts_label or label in (ld.contexts or ()) ] assert sorted(expected) == sorted(actual) @@ -1396,7 +1522,7 @@ def test_filtered_dynamic_contexts(self) -> None: mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) - context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] + context_labels = [self.EMPTY, "two_tests.test_one", "two_tests.test_two"] expected_lines: list[list[TLineNo]] = [[], self.TEST_ONE_LINES, []] for label, expected in zip(context_labels, expected_lines): actual = [ld.number for ld in d.lines if label in (ld.contexts or ())] @@ -1419,11 +1545,12 @@ def test_dynamic_contexts_relative_files(self) -> None: cov.set_option("html:show_contexts", True) mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) - context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] + context_labels = [self.EMPTY, "two_tests.test_one", "two_tests.test_two"] expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] for label, expected in zip(context_labels, expected_lines): actual = [ - ld.number for ld in d.lines + ld.number + for ld in d.lines if label == ld.contexts_label or label in (ld.contexts or ()) ] assert sorted(expected) == sorted(actual) @@ -1447,10 +1574,13 @@ def test_bad_anchor(self) -> None: self.assert_valid_hrefs() -@pytest.mark.parametrize("n, key", [ - (0, "a"), - (1, "b"), - (999999999, "e9S_p"), -]) +@pytest.mark.parametrize( + "n, key", + [ + (0, "a"), + (1, "b"), + (999999999, "e9S_p"), + ], +) def test_encode_int(n: int, key: str) -> None: assert coverage.html.encode_int(n) == key diff --git a/tests/test_json.py b/tests/test_json.py index aeeb81c35..fc43472dd 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -29,7 +29,9 @@ def _assert_expected_json_report( """ Helper that creates an example file for most tests. """ - self.make_file("a.py", """\ + self.make_file( + "a.py", + """\ a = {'b': 1} if a.get('a'): b = 3 @@ -39,7 +41,8 @@ def _assert_expected_json_report( b = 7 if not a: b = 9 - """) + """, + ) self._compare_json_reports(cov, expected_result, "a") def _assert_expected_json_report_with_regions( @@ -50,7 +53,9 @@ def _assert_expected_json_report_with_regions( """ Helper that creates an example file for regions tests. """ - self.make_file("b.py", """\ + self.make_file( + "b.py", + """\ a = {"b": 1} def c(): @@ -66,7 +71,8 @@ def e(self): return 13 def f(self): return 15 - """) + """, + ) self._compare_json_reports(cov, expected_result, "b") def _compare_json_reports( @@ -86,106 +92,108 @@ def _compare_json_reports( with open(output_path, encoding="utf-8") as result_file: parsed_result = json.load(result_file) self.assert_recent_datetime( - datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f"), + datetime.strptime(parsed_result["meta"]["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"), + ) + del parsed_result["meta"]["timestamp"] + expected_result["meta"].update( + { + "version": coverage.__version__, + } ) - del (parsed_result['meta']['timestamp']) - expected_result["meta"].update({ - "version": coverage.__version__, - }) assert parsed_result == expected_result def test_branch_coverage(self) -> None: cov = coverage.Coverage(branch=True) a_py_result = { - 'executed_lines': [1, 2, 4, 5, 8], - 'missing_lines': [3, 7, 9], - 'excluded_lines': [], - 'executed_branches': [ + "executed_lines": [1, 2, 4, 5, 8], + "missing_lines": [3, 7, 9], + "excluded_lines": [], + "executed_branches": [ [2, 4], [4, 5], [8, -1], ], - 'missing_branches': [ + "missing_branches": [ [2, 3], [4, 7], [8, 9], ], - 'summary': { - 'missing_lines': 3, - 'covered_lines': 5, - 'num_statements': 8, - 'num_branches': 6, - 'excluded_lines': 0, - 'num_partial_branches': 3, - 'covered_branches': 3, - 'missing_branches': 3, - 'percent_covered': 57.142857142857146, - 'percent_covered_display': '57', + "summary": { + "missing_lines": 3, + "covered_lines": 5, + "num_statements": 8, + "num_branches": 6, + "excluded_lines": 0, + "num_partial_branches": 3, + "covered_branches": 3, + "missing_branches": 3, + "percent_covered": 57.142857142857146, + "percent_covered_display": "57", }, } expected_result = { - 'meta': { + "meta": { "branch_coverage": True, "format": 3, "show_contexts": False, }, - 'files': { - 'a.py': copy.deepcopy(a_py_result), + "files": { + "a.py": copy.deepcopy(a_py_result), }, - 'totals': { - 'missing_lines': 3, - 'covered_lines': 5, - 'num_statements': 8, - 'num_branches': 6, - 'excluded_lines': 0, - 'num_partial_branches': 3, - 'percent_covered': 57.142857142857146, - 'percent_covered_display': '57', - 'covered_branches': 3, - 'missing_branches': 3, + "totals": { + "missing_lines": 3, + "covered_lines": 5, + "num_statements": 8, + "num_branches": 6, + "excluded_lines": 0, + "num_partial_branches": 3, + "percent_covered": 57.142857142857146, + "percent_covered_display": "57", + "covered_branches": 3, + "missing_branches": 3, }, } # With regions, a lot of data is duplicated. - expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] - expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_simple_line_coverage(self) -> None: cov = coverage.Coverage() a_py_result = { - 'executed_lines': [1, 2, 4, 5, 8], - 'missing_lines': [3, 7, 9], - 'excluded_lines': [], - 'summary': { - 'excluded_lines': 0, - 'missing_lines': 3, - 'covered_lines': 5, - 'num_statements': 8, - 'percent_covered': 62.5, - 'percent_covered_display': '62', + "executed_lines": [1, 2, 4, 5, 8], + "missing_lines": [3, 7, 9], + "excluded_lines": [], + "summary": { + "excluded_lines": 0, + "missing_lines": 3, + "covered_lines": 5, + "num_statements": 8, + "percent_covered": 62.5, + "percent_covered_display": "62", }, } expected_result = { - 'meta': { + "meta": { "branch_coverage": False, "format": 3, "show_contexts": False, }, - 'files': { - 'a.py': copy.deepcopy(a_py_result), + "files": { + "a.py": copy.deepcopy(a_py_result), }, - 'totals': { - 'excluded_lines': 0, - 'missing_lines': 3, - 'covered_lines': 5, - 'num_statements': 8, - 'percent_covered': 62.5, - 'percent_covered_display': '62', + "totals": { + "excluded_lines": 0, + "missing_lines": 3, + "covered_lines": 5, + "num_statements": 8, + "percent_covered": 62.5, + "percent_covered_display": "62", }, } # With regions, a lot of data is duplicated. - expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] - expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_regions_coverage(self) -> None: @@ -500,7 +508,9 @@ def test_branch_regions_coverage(self) -> None: def run_context_test(self, relative_files: bool) -> None: """A helper for two tests below.""" - self.make_file("config", f"""\ + self.make_file( + "config", + f"""\ [run] relative_files = {relative_files} @@ -509,7 +519,8 @@ def run_context_test(self, relative_files: bool) -> None: [json] show_contexts = True - """) + """, + ) cov = coverage.Coverage(context="cool_test", config_file="config") a_py_result = { "executed_lines": [1, 2, 4, 5, 8], @@ -550,8 +561,8 @@ def run_context_test(self, relative_files: bool) -> None: }, } # With regions, a lot of data is duplicated. - expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] - expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] + expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_context_non_relative(self) -> None: @@ -564,7 +575,9 @@ def test_l1_equals_l2(self) -> None: # In results.py, we had a line checking `if l1 == l2` that was never # true. This test makes it true. The annotations are essential, I # don't know why. - self.make_file("wtf.py", """\ + self.make_file( + "wtf.py", + """\ def function( x: int, y: int, @@ -572,7 +585,8 @@ def function( return x + y assert function(3, 5) == 8 - """) + """, + ) cov = coverage.Coverage(branch=True) mod = self.start_import_stop(cov, "wtf") cov.json_report(mod) diff --git a/tests/test_lcov.py b/tests/test_lcov.py index f4dff3801..6472376e5 100644 --- a/tests/test_lcov.py +++ b/tests/test_lcov.py @@ -21,15 +21,20 @@ def create_initial_files(self) -> None: Helper for tests that handles the common ceremony so the tests can show the consequences of changes in the setup. """ - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def cuboid_volume(l): return (l*l*l) def IsItTrue(): return True - """) + """, + ) - self.make_file("test_file.py", """\ + self.make_file( + "test_file.py", + """\ from main_file import cuboid_volume import unittest @@ -39,7 +44,8 @@ def test_volume(self): self.assertAlmostEqual(cuboid_volume(1),1) self.assertAlmostEqual(cuboid_volume(0),0) self.assertAlmostEqual(cuboid_volume(5.5),166.375) - """) + """, + ) def get_lcov_report_content(self, filename: str = "coverage.lcov") -> str: """Return the content of an LCOV report.""" @@ -49,13 +55,16 @@ def get_lcov_report_content(self, filename: str = "coverage.lcov") -> str: def test_lone_file(self) -> None: # For a single file with a couple of functions, the lcov should cover # the function definitions themselves, but not the returns. - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def cuboid_volume(l): return (l*l*l) def IsItTrue(): return True - """) + """, + ) expected_result = textwrap.dedent("""\ SF:main_file.py DA:1,1 @@ -81,13 +90,16 @@ def IsItTrue(): assert expected_result == actual_result def test_line_checksums(self) -> None: - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def cuboid_volume(l): return (l*l*l) def IsItTrue(): return True - """) + """, + ) self.make_file(".coveragerc", "[lcov]\nline_checksums = true\n") self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(source=["."]) @@ -161,13 +173,16 @@ def test_simple_line_coverage_two_files(self) -> None: def test_branch_coverage_one_file(self) -> None: # Test that the reporter produces valid branch coverage. - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def is_it_x(x): if x == 3: return x else: return False - """) + """, + ) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "main_file") @@ -198,15 +213,20 @@ def is_it_x(x): def test_branch_coverage_two_files(self) -> None: # Test that valid branch coverage is generated # in the case of two files. - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ def is_it_x(x): if x == 3: return x else: return False - """) + """, + ) - self.make_file("test_file.py", """\ + self.make_file( + "test_file.py", + """\ from main_file import * import unittest @@ -214,7 +234,8 @@ class TestIsItX(unittest.TestCase): def test_is_it_x(self): self.assertEqual(is_it_x(3), 3) self.assertEqual(is_it_x(4), False) - """) + """, + ) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "test_file") @@ -259,14 +280,17 @@ def test_is_it_x(self): def test_half_covered_branch(self) -> None: # Test that for a given branch that is only half covered, # the block numbers remain the same, and produces valid lcov. - self.make_file("main_file.py", """\ + self.make_file( + "main_file.py", + """\ something = True if something: print("Yes, something") else: print("No, nothing") - """) + """, + ) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "main_file") @@ -324,11 +348,16 @@ def test_empty_init_file_skipped(self) -> None: assert expected_result == actual_result def test_excluded_lines(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [report] exclude_lines = foo - """) - self.make_file("runme.py", """\ + """, + ) + self.make_file( + "runme.py", + """\ s = "Hello 1" t = "foo is ignored 2" if s.upper() == "BYE 3": @@ -337,7 +366,8 @@ def test_excluded_lines(self) -> None: print("Done 6") # foo 7 # line 8 - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -359,7 +389,9 @@ def test_excluded_lines(self) -> None: assert expected_result == actual_result def test_exit_branches(self) -> None: - self.make_file("runme.py", """\ + self.make_file( + "runme.py", + """\ def foo(a): if a: print(f"{a!r} is truthy") @@ -367,7 +399,8 @@ def foo(a): foo(False) foo([]) foo([0]) - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -396,7 +429,9 @@ def foo(a): assert expected_result == actual_result def test_genexpr_exit_arcs_pruned_full_coverage(self) -> None: - self.make_file("runme.py", """\ + self.make_file( + "runme.py", + """\ def foo(a): if any(x > 0 for x in a): print(f"{a!r} has positives") @@ -404,7 +439,8 @@ def foo(a): foo([0]) foo([0,1]) foo([0,-1]) - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -433,13 +469,16 @@ def foo(a): assert expected_result == actual_result def test_genexpr_exit_arcs_pruned_never_true(self) -> None: - self.make_file("runme.py", """\ + self.make_file( + "runme.py", + """\ def foo(a): if any(x > 0 for x in a): print(f"{a!r} has positives") foo([]) foo([0]) - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -466,13 +505,16 @@ def foo(a): assert expected_result == actual_result def test_genexpr_exit_arcs_pruned_always_true(self) -> None: - self.make_file("runme.py", """\ + self.make_file( + "runme.py", + """\ def foo(a): if any(x > 0 for x in a): print(f"{a!r} has positives") foo([1]) foo([1,2]) - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -499,11 +541,14 @@ def foo(a): assert expected_result == actual_result def test_genexpr_exit_arcs_pruned_not_reached(self) -> None: - self.make_file("runme.py", """\ + self.make_file( + "runme.py", + """\ def foo(a): if any(x > 0 for x in a): print(f"{a!r} has positives") - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() @@ -528,7 +573,9 @@ def foo(a): assert expected_result == actual_result def test_always_raise(self) -> None: - self.make_file("always_raise.py", """\ + self.make_file( + "always_raise.py", + """\ try: if not_defined: print("Yes") @@ -536,7 +583,8 @@ def test_always_raise(self) -> None: print("No") except Exception: pass - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "always_raise") cov.lcov_report() @@ -560,13 +608,16 @@ def test_always_raise(self) -> None: assert expected_result == actual_result def test_multiline_conditions(self) -> None: - self.make_file("multi.py", """\ + self.make_file( + "multi.py", + """\ def fun(x): if ( x ): print("got here") - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "multi") cov.lcov_report() @@ -574,14 +625,17 @@ def fun(x): assert "BRDA:2,0,return from function 'fun',-" in lcov def test_module_exit(self) -> None: - self.make_file("modexit.py", """\ + self.make_file( + "modexit.py", + """\ #! /usr/bin/env python def foo(): return bar( ) if "x" == "y": # line 5 foo() - """) + """, + ) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "modexit") cov.lcov_report() diff --git a/tests/test_misc.py b/tests/test_misc.py index 5e674094d..0164071f6 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -49,16 +49,16 @@ def test_unicode_hashing(self) -> None: def test_dict_hashing(self) -> None: h1 = Hasher() - h1.update({'a': 17, 'b': 23}) + h1.update({"a": 17, "b": 23}) h2 = Hasher() - h2.update({'b': 23, 'a': 17}) + h2.update({"b": 23, "a": 17}) assert h1.hexdigest() == h2.hexdigest() def test_dict_collision(self) -> None: h1 = Hasher() - h1.update({'a': 17, 'b': {'c': 1, 'd': 2}}) + h1.update({"a": 17, "b": {"c": 1, "d": 2}}) h2 = Hasher() - h2.update({'a': 17, 'b': {'c': 1}, 'd': 2}) + h2.update({"a": 17, "b": {"c": 1}, "d": 2}) assert h1.hexdigest() != h2.hexdigest() @@ -83,28 +83,36 @@ def test_actual_errors(self) -> None: VARS = { - 'FOO': 'fooey', - 'BAR': 'xyzzy', + "FOO": "fooey", + "BAR": "xyzzy", } -@pytest.mark.parametrize("before, after", [ - ("Nothing to do", "Nothing to do"), - ("Dollar: $$", "Dollar: $"), - ("Simple: $FOO is fooey", "Simple: fooey is fooey"), - ("Braced: X${FOO}X.", "Braced: XfooeyX."), - ("Missing: x${NOTHING}y is xy", "Missing: xy is xy"), - ("Multiple: $$ $FOO $BAR ${FOO}", "Multiple: $ fooey xyzzy fooey"), - ("Ill-formed: ${%5} ${{HI}} ${", "Ill-formed: ${%5} ${{HI}} ${"), - ("Strict: ${FOO?} is there", "Strict: fooey is there"), - ("Defaulted: ${WUT-missing}!", "Defaulted: missing!"), - ("Defaulted empty: ${WUT-}!", "Defaulted empty: !"), -]) + +@pytest.mark.parametrize( + "before, after", + [ + ("Nothing to do", "Nothing to do"), + ("Dollar: $$", "Dollar: $"), + ("Simple: $FOO is fooey", "Simple: fooey is fooey"), + ("Braced: X${FOO}X.", "Braced: XfooeyX."), + ("Missing: x${NOTHING}y is xy", "Missing: xy is xy"), + ("Multiple: $$ $FOO $BAR ${FOO}", "Multiple: $ fooey xyzzy fooey"), + ("Ill-formed: ${%5} ${{HI}} ${", "Ill-formed: ${%5} ${{HI}} ${"), + ("Strict: ${FOO?} is there", "Strict: fooey is there"), + ("Defaulted: ${WUT-missing}!", "Defaulted: missing!"), + ("Defaulted empty: ${WUT-}!", "Defaulted empty: !"), + ], +) def test_substitute_variables(before: str, after: str) -> None: assert substitute_variables(before, VARS) == after -@pytest.mark.parametrize("text", [ - "Strict: ${NOTHING?} is an error", -]) + +@pytest.mark.parametrize( + "text", + [ + "Strict: ${NOTHING?} is an error", + ], +) def test_substitute_variables_errors(text: str) -> None: with pytest.raises(CoverageException) as exc_info: substitute_variables(text, VARS) @@ -142,10 +150,12 @@ def test_failure(self) -> None: ("4.0 3.10-win 3.10-mac 3.9-mac 3.9-win", "3.9-mac 3.9-win 3.10-mac 3.10-win 4.0"), ] + @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted(words: str, ordered: str) -> None: assert " ".join(human_sorted(words.split())) == ordered + @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted_items(words: str, ordered: str) -> None: keys = words.split() @@ -159,7 +169,7 @@ def test_human_sorted_items(words: str, ordered: str) -> None: def test_stdout_link_tty() -> None: - with mock.patch.object(sys.stdout, "isatty", lambda:True): + with mock.patch.object(sys.stdout, "isatty", lambda: True): link = stdout_link("some text", "some url") assert link == "\033]8;;some url\asome text\033]8;;\a" diff --git a/tests/test_mixins.py b/tests/test_mixins.py index cb49ab924..d2f02c423 100644 --- a/tests/test_mixins.py +++ b/tests/test_mixins.py @@ -18,7 +18,7 @@ class TempDirMixinTest(TempDirMixin): def file_text(self, fname: str) -> str: """Return the text read from a file.""" with open(fname, "rb") as f: - return f.read().decode('ascii') + return f.read().decode("ascii") def test_make_file(self) -> None: # A simple file. @@ -34,10 +34,13 @@ def test_make_file(self) -> None: self.make_file("sub/deeper/evenmore/third.txt") assert self.file_text("sub/deeper/evenmore/third.txt") == "" # Dedenting - self.make_file("dedented.txt", """\ + self.make_file( + "dedented.txt", + """\ Hello Bye - """) + """, + ) assert self.file_text("dedented.txt") == "Hello\nBye\n" def test_make_file_newline(self) -> None: @@ -67,7 +70,8 @@ class RestoreModulessMixinTest(TempDirMixin, RestoreModulesMixin): @pytest.mark.parametrize("val", [17, 42]) def test_module_independence(self, val: int) -> None: self.make_file("xyzzy.py", f"A = {val}") - import xyzzy # pylint: disable=import-error + import xyzzy # pylint: disable=import-error + assert xyzzy.A == val def test_cleanup_and_reimport(self) -> None: diff --git a/tests/test_numbits.py b/tests/test_numbits.py index 55a577475..9132f5550 100644 --- a/tests/test_numbits.py +++ b/tests/test_numbits.py @@ -15,8 +15,13 @@ from coverage import env from coverage.numbits import ( - nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection, - numbits_any_intersection, num_in_numbits, register_sqlite_functions, + nums_to_numbits, + numbits_to_nums, + numbits_union, + numbits_intersection, + numbits_any_intersection, + num_in_numbits, + register_sqlite_functions, ) from tests.coveragetest import CoverageTest @@ -28,7 +33,7 @@ # When coverage-testing ourselves, hypothesis complains about a test being # flaky because the first run exceeds the deadline (and fails), and the second # run succeeds. Disable the deadline if we are coverage-testing. -default_settings = settings(deadline=400) # milliseconds +default_settings = settings(deadline=400) # milliseconds if env.METACOV: default_settings = settings(default_settings, deadline=None) @@ -110,33 +115,52 @@ def setUp(self) -> None: self.cursor.execute("create table data (id int, numbits blob)") self.cursor.executemany( "insert into data (id, numbits) values (?, ?)", - [ - (i, nums_to_numbits(range(i, 100, i))) - for i in range(1, 11) - ], + [(i, nums_to_numbits(range(i, 100, i))) for i in range(1, 11)], ) self.addCleanup(self.cursor.close) def test_numbits_union(self) -> None: res = self.cursor.execute( - "select numbits_union(" + - "(select numbits from data where id = 7)," + - "(select numbits from data where id = 9)" + - ")", + "select numbits_union(" + + "(select numbits from data where id = 7)," + + "(select numbits from data where id = 9)" + + ")", ) expected = [ - 7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49, - 54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99, + 7, + 9, + 14, + 18, + 21, + 27, + 28, + 35, + 36, + 42, + 45, + 49, + 54, + 56, + 63, + 70, + 72, + 77, + 81, + 84, + 90, + 91, + 98, + 99, ] answer = numbits_to_nums(list(res)[0][0]) assert expected == answer def test_numbits_intersection(self) -> None: res = self.cursor.execute( - "select numbits_intersection(" + - "(select numbits from data where id = 7)," + - "(select numbits from data where id = 9)" + - ")", + "select numbits_intersection(" + + "(select numbits from data where id = 7)," + + "(select numbits from data where id = 9)" + + ")", ) answer = numbits_to_nums(list(res)[0][0]) assert [63] == answer diff --git a/tests/test_oddball.py b/tests/test_oddball.py index d7b592d24..b91c47cc1 100644 --- a/tests/test_oddball.py +++ b/tests/test_oddball.py @@ -27,7 +27,8 @@ class ThreadingTest(CoverageTest): """Tests of the threading support.""" def test_threading(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import threading def fromMainThread(): @@ -49,7 +50,8 @@ def neverCalled(): ) def test_thread_run(self) -> None: - self.check_coverage("""\ + self.check_coverage( + """\ import threading class TestThread(threading.Thread): @@ -75,7 +77,8 @@ class RecursionTest(CoverageTest): def test_short_recursion(self) -> None: # We can definitely get close to 500 stack frames. - self.check_coverage("""\ + self.check_coverage( + """\ def recur(n): if n == 0: return 0 @@ -94,7 +97,8 @@ def test_long_recursion(self) -> None: # We can't finish a very deep recursion, but we don't crash. with pytest.raises(RuntimeError): with swallow_warnings("Trace function changed, data is likely wrong: None"): - self.check_coverage("""\ + self.check_coverage( + """\ def recur(n): if n == 0: return 0 @@ -116,7 +120,9 @@ def test_long_recursion_recovery(self) -> None: # the C trace function, only line 3 will be missing, and all else # will be traced. - self.make_file("recur.py", """\ + self.make_file( + "recur.py", + """\ import sys #; sys.setrecursionlimit(70) def recur(n): if n == 0: @@ -129,7 +135,8 @@ def recur(n): except RuntimeError: i = 11 i = 12 - """) + """, + ) cov = coverage.Coverage() with swallow_warnings("Trace function changed, data is likely wrong: None"): @@ -138,7 +145,7 @@ def recur(n): assert cov._collector is not None pytrace = (cov._collector.tracer_name() == "PyTracer") # fmt: skip expected_missing = [4] - if pytrace: # pragma: no metacov + if pytrace: # pragma: no metacov expected_missing += [10, 11, 12] _, statements, missing, _ = cov.analysis("recur.py") @@ -146,12 +153,12 @@ def recur(n): assert expected_missing == missing # Get a warning about the stackoverflow effect on the tracing function. - if pytrace and not env.METACOV: # pragma: no metacov + if pytrace and not env.METACOV: # pragma: no metacov assert len(cov._warnings) == 1 assert re.fullmatch( - r"Trace function changed, data is likely wrong: None != " + - r">", + r"Trace function changed, data is likely wrong: None != " + + r">", cov._warnings[0], ) else: @@ -167,14 +174,18 @@ class MemoryLeakTest(CoverageTest): It may still fail occasionally, especially on PyPy. """ + @pytest.mark.flaky @pytest.mark.skipif(not testenv.C_TRACER, reason="Only the C tracer has refcounting issues") def test_for_leaks(self) -> None: # Our original bad memory leak only happened on line numbers > 255, so # make a code object with more lines than that. Ugly string mumbo # jumbo to get 300 blank lines at the beginning.. - code = """\ - # blank line\n""" * 300 + """\ + code = ( + """\ + # blank line\n""" + * 300 + + """\ def once(x): # line 301 if x % 100 == 0: raise Exception("100!") @@ -190,8 +201,9 @@ def once(x): # line 301 pass i += 1 # line 315 """ + ) lines = list(range(301, 315)) - lines.remove(306) # Line 306 is the "else". + lines.remove(306) # Line 306 is the "else". # This is a non-deterministic test, so try it a few times, and fail it # only if it predominantly fails. @@ -206,10 +218,10 @@ def once(x): # line 301 # running it 10 times. ram_growth = (ram_10k - ram_10) - (ram_10 - ram_0) if ram_growth > 100000: - fails += 1 # pragma: only failure + fails += 1 # pragma: only failure if fails > 8: - pytest.fail("RAM grew by %d" % (ram_growth)) # pragma: only failure + pytest.fail("RAM grew by %d" % (ram_growth)) # pragma: only failure @pytest.mark.skipif( not testenv.C_TRACER, @@ -219,7 +231,7 @@ def once(x): # line 301 ) @pytest.mark.skipif( env.PYVERSION[:2] == (3, 13) and not env.GIL, - reason = "3.13t never frees code objects: https://github.com/python/cpython/pull/131989", + reason="3.13t never frees code objects: https://github.com/python/cpython/pull/131989", ) @pytest.mark.parametrize("branch", [False, True]) def test_eval_codeobject_leak(self, branch: bool) -> None: @@ -237,7 +249,7 @@ def test_eval_codeobject_leak(self, branch: bool) -> None: self.check_coverage(code, lines=[1, 2, 3], missing="", branch=branch) now = osinfo.process_ram() deltas.append(now - base) - print(f"Mem delta: {(now - base)//1024}") + print(f"Mem delta: {(now - base) // 1024}") base = now assert any(d < 50 * 1024 for d in deltas) @@ -246,21 +258,26 @@ class MemoryFumblingTest(CoverageTest): """Test that we properly manage the None refcount.""" @pytest.mark.skipif(not testenv.C_TRACER, reason="Only the C tracer has refcounting issues") - def test_dropping_none(self) -> None: # pragma: not covered + def test_dropping_none(self) -> None: # pragma: not covered # TODO: Mark this so it will only be run sometimes. pytest.skip("This is too expensive for now (30s)") # Start and stop coverage thousands of times to flush out bad # reference counting, maybe. - _ = "this is just here to put a type comment on" # type: ignore[unreachable] - self.make_file("the_code.py", """\ + _ = "this is just here to put a type comment on" # type: ignore[unreachable] + self.make_file( + "the_code.py", + """\ import random def f(): if random.random() > .5: x = 1 else: x = 2 - """) - self.make_file("main.py", """\ + """, + ) + self.make_file( + "main.py", + """\ import coverage import sys from the_code import f @@ -271,7 +288,8 @@ def f(): cov.stop() cov.erase() print("Final None refcount: %d" % (sys.getrefcount(None))) - """) + """, + ) status, out = self.run_command_status("python main.py") assert status == 0 assert "Final None refcount" in out @@ -289,7 +307,9 @@ def test_pyexpat(self) -> None: # also detect if the pyexpat bug is fixed unbeknownst to us, meaning # we'd see two RETURNs where there should only be one. - self.make_file("trydom.py", """\ + self.make_file( + "trydom.py", + """\ import xml.dom.minidom XML = '''\\ @@ -303,9 +323,10 @@ def foo(): a = 11 foo() - """) + """, + ) - self.make_file("outer.py", "\n"*100 + "import trydom\na = 102\n") + self.make_file("outer.py", "\n" * 100 + "import trydom\na = 102\n") cov = coverage.Coverage() cov.erase() @@ -344,21 +365,31 @@ def test_exception(self) -> None: # file has active lines in a different range so we'll see if the lines # get attributed to the wrong file. - self.make_file("oops.py", """\ + self.make_file( + "oops.py", + """\ def oops(args): a = 2 raise Exception("oops") a = 4 - """) + """, + ) - self.make_file("fly.py", "\n"*100 + """\ + self.make_file( + "fly.py", + "\n" * 100 + + """\ def fly(calls): a = 2 calls[0](calls[1:]) a = 4 - """) + """, + ) - self.make_file("catch.py", "\n"*200 + """\ + self.make_file( + "catch.py", + "\n" * 200 + + """\ def catch(calls): try: a = 3 @@ -366,15 +397,20 @@ def catch(calls): a = 5 except: a = 7 - """) + """, + ) - self.make_file("doit.py", "\n"*300 + """\ + self.make_file( + "doit.py", + "\n" * 300 + + """\ def doit(calls): try: calls[0](calls[1:]) except: a = 5 - """) + """, + ) # Import all the modules before starting coverage, so the def lines # won't be in all the results. @@ -384,32 +420,43 @@ def doit(calls): # Each run nests the functions differently to get different # combinations of catching exceptions and letting them fly. runs = [ - ("doit fly oops", { - 'doit.py': [302, 303, 304, 305], - 'fly.py': [102, 103], - 'oops.py': [2, 3], - }), - ("doit catch oops", { - 'doit.py': [302, 303], - 'catch.py': [202, 203, 204, 206, 207], - 'oops.py': [2, 3], - }), - ("doit fly catch oops", { - 'doit.py': [302, 303], - 'fly.py': [102, 103, 104], - 'catch.py': [202, 203, 204, 206, 207], - 'oops.py': [2, 3], - }), - ("doit catch fly oops", { - 'doit.py': [302, 303], - 'catch.py': [202, 203, 204, 206, 207], - 'fly.py': [102, 103], - 'oops.py': [2, 3], - }), + ( + "doit fly oops", + { + "doit.py": [302, 303, 304, 305], + "fly.py": [102, 103], + "oops.py": [2, 3], + }, + ), + ( + "doit catch oops", + { + "doit.py": [302, 303], + "catch.py": [202, 203, 204, 206, 207], + "oops.py": [2, 3], + }, + ), + ( + "doit fly catch oops", + { + "doit.py": [302, 303], + "fly.py": [102, 103, 104], + "catch.py": [202, 203, 204, 206, 207], + "oops.py": [2, 3], + }, + ), + ( + "doit catch fly oops", + { + "doit.py": [302, 303], + "catch.py": [202, 203, 204, 206, 207], + "fly.py": [102, 103], + "oops.py": [2, 3], + }, + ), ] for callnames, lines_expected in runs: - # Make the list of functions we'll call for this test. callnames_list = callnames.split() calls = [getattr(sys.modules[cn], cn) for cn in callnames_list] @@ -439,7 +486,9 @@ def test_doctest(self) -> None: # file they were in. Below, one of the doctests has four lines (1-4), # which would incorrectly claim that lines 1-4 of the file were # executed. In this file, line 2 is not executed. - self.make_file("the_doctest.py", '''\ + self.make_file( + "the_doctest.py", + '''\ if "x" in "abc": print("hello") def return_arg_or_void(arg): @@ -464,7 +513,8 @@ def return_arg_or_void(arg): import doctest, sys doctest.testmod(sys.modules[__name__]) # we're not __main__ :( - ''') + ''', + ) cov = coverage.Coverage() with warnings.catch_warnings(): # Doctest calls pdb which opens ~/.pdbrc without an encoding argument, @@ -479,10 +529,13 @@ def return_arg_or_void(arg): class GettraceTest(CoverageTest): """Tests that we work properly with `sys.gettrace()`.""" + def test_round_trip_in_untraced_function(self) -> None: # https://github.com/nedbat/coveragepy/issues/575 self.make_file("main.py", """import sample""") - self.make_file("sample.py", """\ + self.make_file( + "sample.py", + """\ from swap import swap_it def doit(): print(3) @@ -495,12 +548,16 @@ def doit_soon(): print(10) doit_soon() print(12) - """) - self.make_file("swap.py", """\ + """, + ) + self.make_file( + "swap.py", + """\ import sys def swap_it(): sys.settrace(sys.gettrace()) - """) + """, + ) # Use --source=sample to prevent measurement of swap.py. cov = coverage.Coverage(source=["sample"]) @@ -518,7 +575,8 @@ def test_setting_new_trace_function(self) -> None: missing = "5-7, 13-14" else: missing = "5-7" - self.check_coverage('''\ + self.check_coverage( + """\ import os.path import sys @@ -543,7 +601,7 @@ def test_unsets_trace() -> None: sys.settrace(old) a = 21 b = 22 - ''', + """, lines=[1, 2, 4, 5, 6, 7, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 23, 24], missing=missing, ) @@ -551,10 +609,10 @@ def test_unsets_trace() -> None: assert self.last_module_name is not None out = self.stdout().replace(self.last_module_name, "coverage_test") expected = ( - "call: coverage_test.py @ 12\n" + - "line: coverage_test.py @ 13\n" + - "line: coverage_test.py @ 14\n" + - "return: coverage_test.py @ 14\n" + "call: coverage_test.py @ 12\n" + + "line: coverage_test.py @ 13\n" + + "line: coverage_test.py @ 14\n" + + "return: coverage_test.py @ 14\n" ) assert expected == out @@ -563,7 +621,9 @@ def test_atexit_gettrace(self) -> None: # This is not a test of coverage at all, but of our understanding # of this edge-case behavior in various Pythons. - self.make_file("atexit_gettrace.py", """\ + self.make_file( + "atexit_gettrace.py", + """\ import atexit, sys def trace_function(frame, event, arg): @@ -578,7 +638,8 @@ def show_trace_function(): atexit.register(show_trace_function) # This will show what the trace function is at the end of the program. - """) + """, + ) status, out = self.run_command_status("python atexit_gettrace.py") assert status == 0 if env.PYPY: @@ -591,24 +652,31 @@ def show_trace_function(): class ExecTest(CoverageTest): """Tests of exec.""" + def test_correct_filename(self) -> None: # https://github.com/nedbat/coveragepy/issues/380 # Bug was that exec'd files would have their lines attributed to the # calling file. Make two files, both with ~30 lines, but no lines in # common. Line 30 in to_exec.py was recorded as line 30 in main.py, # but now it's fixed. :) - self.make_file("to_exec.py", """\ + self.make_file( + "to_exec.py", + """\ \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n print("var is {}".format(var)) # line 31 - """) - self.make_file("main.py", """\ + """, + ) + self.make_file( + "main.py", + """\ namespace = {'var': 17} with open("to_exec.py", encoding="utf-8") as to_exec_py: code = compile(to_exec_py.read(), 'to_exec.py', 'exec') exec(code, globals(), namespace) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n print("done") # line 35 - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "main") @@ -637,11 +705,14 @@ class MockingProtectionTest(CoverageTest): https://github.com/nedbat/coveragepy/issues/416 """ + def test_os_path_exists(self) -> None: # To see if this test still detects the problem, change isolate_module # in misc.py to simply return its argument. It should fail with a # StopIteration error. - self.make_file("bug416.py", """\ + self.make_file( + "bug416.py", + """\ import os.path from unittest import mock @@ -654,13 +725,18 @@ def test_path_exists(mock_exists): print(os.path.exists(".")) test_path_exists() - """) - self.make_file("bug416a.py", """\ + """, + ) + self.make_file( + "bug416a.py", + """\ print("bug416a.py") foo = 23 - """) + """, + ) import py_compile + py_compile.compile("bug416a.py") out = self.run_command("coverage run bug416.py") assert out == "in test\nbug416a.py\n23\n17\n" diff --git a/tests/test_parser.py b/tests/test_parser.py index 8a0051b46..07ff0287c 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -50,7 +50,13 @@ class Bar: pass """) assert parser.exit_counts() == { - 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1, + 2: 1, + 3: 1, + 4: 2, + 5: 1, + 7: 1, + 9: 1, + 10: 1, } def test_try_except(self) -> None: @@ -66,7 +72,15 @@ def test_try_except(self) -> None: b = 9 """) assert parser.exit_counts() == { - 1: 1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 1, + 8: 1, + 9: 1, } def test_excluded_classes(self) -> None: @@ -79,7 +93,7 @@ def __init__(self): class Bar: pass """) - assert parser.exit_counts() == { 2:1, 3:1 } + assert parser.exit_counts() == {2: 1, 3: 1} def test_missing_branch_to_excluded_code(self) -> None: parser = self.parse_text("""\ @@ -89,7 +103,7 @@ def test_missing_branch_to_excluded_code(self) -> None: a = 4 b = 5 """) - assert parser.exit_counts() == { 1:1, 2:1, 5:1 } + assert parser.exit_counts() == {1: 1, 2: 1, 5: 1} parser = self.parse_text("""\ def foo(): if fooey: @@ -98,7 +112,7 @@ def foo(): a = 5 b = 6 """) - assert parser.exit_counts() == { 1:1, 2:2, 3:1, 5:1, 6:1 } + assert parser.exit_counts() == {1: 1, 2: 2, 3: 1, 5: 1, 6: 1} parser = self.parse_text("""\ def foo(): if fooey: @@ -107,21 +121,24 @@ def foo(): a = 5 b = 6 """) - assert parser.exit_counts() == { 1:1, 2:1, 3:1, 6:1 } - - @pytest.mark.parametrize("text", [ - pytest.param("0 spaces\n 2\n 1", id="bad_indent"), - pytest.param("'''", id="string_eof"), - pytest.param("$hello", id="dollar"), - # on 3.10 this passes ast.parse but fails on tokenize.generate_tokens - pytest.param( - "\r'\\\n'''", - id="leading_newline_eof", - marks=[ - pytest.mark.skipif(env.PYVERSION >= (3, 12), reason="parses fine in 3.12"), - ] - ) - ]) + assert parser.exit_counts() == {1: 1, 2: 1, 3: 1, 6: 1} + + @pytest.mark.parametrize( + "text", + [ + pytest.param("0 spaces\n 2\n 1", id="bad_indent"), + pytest.param("'''", id="string_eof"), + pytest.param("$hello", id="dollar"), + # on 3.10 this passes ast.parse but fails on tokenize.generate_tokens + pytest.param( + "\r'\\\n'''", + id="leading_newline_eof", + marks=[ + pytest.mark.skipif(env.PYVERSION >= (3, 12), reason="parses fine in 3.12"), + ], + ), + ], + ) def test_not_python(self, text: str) -> None: msg = r"Couldn't parse '' as Python source: ['\"].*['\"] at line \d+" with pytest.raises(NotPython, match=msg): @@ -190,8 +207,8 @@ def test_fuzzed_double_parse(self) -> None: # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50381 # The second parse used to raise `TypeError: 'NoneType' object is not iterable` msg = ( - r"(EOF in multi-line statement)" # before 3.12.0b1 - + r"|(unmatched ']')" # after 3.12.0b1 + r"(EOF in multi-line statement)" # before 3.12.0b1 + + r"|(unmatched ']')" # after 3.12.0b1 ) with pytest.raises(NotPython, match=msg): self.parse_text("]") @@ -227,17 +244,19 @@ class ExclusionParserTest(PythonParserTestBase): """Tests for the exclusion code in PythonParser.""" def test_simple(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 1; b = 2 if len([]): a = 4 # nocover """, ) - assert parser.statements == {1,3} + assert parser.statements == {1, 3} def test_excluding_if_suite(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 1; b = 2 if len([]): # nocover @@ -247,10 +266,11 @@ def test_excluding_if_suite(self) -> None: assert a == 1 and b == 2 """, ) - assert parser.statements == {1,7} + assert parser.statements == {1, 7} def test_excluding_if_but_not_else_suite(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 1; b = 2 if len([]): # nocover @@ -263,10 +283,11 @@ def test_excluding_if_but_not_else_suite(self) -> None: assert a == 8 and b == 9 """, ) - assert parser.statements == {1,8,9,10} + assert parser.statements == {1, 8, 9, 10} def test_excluding_else_suite(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 1; b = 2 if 1==1: @@ -279,8 +300,9 @@ def test_excluding_else_suite(self) -> None: assert a == 4 and b == 5 and c == 6 """, ) - assert parser.statements == {1,3,4,5,6,10} - parser = self.parse_text("""\ + assert parser.statements == {1, 3, 4, 5, 6, 10} + parser = self.parse_text( + """\ a = 1; b = 2 if 1==1: @@ -300,10 +322,11 @@ def test_excluding_else_suite(self) -> None: assert a == 4 and b == 5 and c == 6 """, ) - assert parser.statements == {1,3,4,5,6,17} + assert parser.statements == {1, 3, 4, 5, 6, 17} def test_excluding_oneline_if(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): a = 2 if len([]): x = 3 # nocover @@ -312,10 +335,11 @@ def foo(): foo() """, ) - assert parser.statements == {1,2,4,6} + assert parser.statements == {1, 2, 4, 6} def test_excluding_a_colon_not_a_suite(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): l = list(range(10)) a = l[:3] # nocover @@ -324,18 +348,20 @@ def foo(): foo() """, ) - assert parser.statements == {1,2,4,6} + assert parser.statements == {1, 2, 4, 6} def test_excluding_for_suite(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 0 for i in [1,2,3,4,5]: # nocover a += i assert a == 15 """, ) - assert parser.statements == {1,4} - parser = self.parse_text("""\ + assert parser.statements == {1, 4} + parser = self.parse_text( + """\ a = 0 for i in [1, 2,3,4, @@ -344,8 +370,9 @@ def test_excluding_for_suite(self) -> None: assert a == 15 """, ) - assert parser.statements == {1,6} - parser = self.parse_text("""\ + assert parser.statements == {1, 6} + parser = self.parse_text( + """\ a = 0 for i in [1,2,3,4,5 ]: # nocover @@ -355,10 +382,11 @@ def test_excluding_for_suite(self) -> None: assert a == 1 """, ) - assert parser.statements == {1,7} + assert parser.statements == {1, 7} def test_excluding_for_else(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 0 for i in range(5): a += i+1 @@ -368,10 +396,11 @@ def test_excluding_for_else(self) -> None: assert a == 1 """, ) - assert parser.statements == {1,2,3,4,7} + assert parser.statements == {1, 2, 3, 4, 7} def test_excluding_while(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 3; b = 0 while a*b: # nocover b += 1 @@ -379,8 +408,9 @@ def test_excluding_while(self) -> None: assert a == 3 and b == 0 """, ) - assert parser.statements == {1,5} - parser = self.parse_text("""\ + assert parser.statements == {1, 5} + parser = self.parse_text( + """\ a = 3; b = 0 while ( a*b @@ -390,10 +420,11 @@ def test_excluding_while(self) -> None: assert a == 3 and b == 0 """, ) - assert parser.statements == {1,7} + assert parser.statements == {1, 7} def test_excluding_while_else(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 3; b = 0 while a: b += 1 @@ -403,10 +434,11 @@ def test_excluding_while_else(self) -> None: assert a == 3 and b == 1 """, ) - assert parser.statements == {1,2,3,4,7} + assert parser.statements == {1, 2, 3, 4, 7} def test_excluding_try_except(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = 0 try: a = 1 @@ -415,8 +447,9 @@ def test_excluding_try_except(self) -> None: assert a == 1 """, ) - assert parser.statements == {1,2,3,6} - parser = self.parse_text("""\ + assert parser.statements == {1, 2, 3, 6} + parser = self.parse_text( + """\ a = 0 try: a = 1 @@ -426,8 +459,9 @@ def test_excluding_try_except(self) -> None: assert a == 99 """, ) - assert parser.statements == {1,2,3,4,5,6,7} - parser = self.parse_text("""\ + assert parser.statements == {1, 2, 3, 4, 5, 6, 7} + parser = self.parse_text( + """\ a = 0 try: a = 1 @@ -439,11 +473,12 @@ def test_excluding_try_except(self) -> None: assert a == 123 """, ) - assert parser.statements == {1,2,3,4,7,8,9} + assert parser.statements == {1, 2, 3, 4, 7, 8, 9} def test_excluding_if_pass(self) -> None: # From a comment on the coverage.py page by Michael McNeil Forbes: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def f(): if False: # pragma: nocover pass # This line still reported as missing @@ -453,17 +488,19 @@ def f(): f() """, ) - assert parser.statements == {1,7} + assert parser.statements == {1, 7} def test_multiline_if_no_branch(self) -> None: # From https://github.com/nedbat/coveragepy/issues/754 - parser = self.parse_text("""\ + parser = self.parse_text( + """\ if (this_is_a_verylong_boolean_expression == True # pragma: no branch and another_long_expression and here_another_expression): do_something() """, ) - parser2 = self.parse_text("""\ + parser2 = self.parse_text( + """\ if this_is_a_verylong_boolean_expression == True and another_long_expression \\ and here_another_expression: # pragma: no branch do_something() @@ -474,7 +511,8 @@ def test_multiline_if_no_branch(self) -> None: assert parser.lines_matching(pragma_re) == parser2.lines_matching(pragma_re) def test_excluding_function(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def fn(foo): # nocover a = 1 b = 2 @@ -484,8 +522,9 @@ def fn(foo): # nocover assert x == 1 """, ) - assert parser.statements == {6,7} - parser = self.parse_text("""\ + assert parser.statements == {6, 7} + parser = self.parse_text( + """\ a = 0 def very_long_function_to_exclude_name(very_long_argument1, very_long_argument2): @@ -494,8 +533,9 @@ def very_long_function_to_exclude_name(very_long_argument1, """, exclude="function_to_exclude", ) - assert parser.statements == {1,5} - parser = self.parse_text("""\ + assert parser.statements == {1, 5} + parser = self.parse_text( + """\ a = 0 def very_long_function_to_exclude_name( very_long_argument1, @@ -506,8 +546,9 @@ def very_long_function_to_exclude_name( """, exclude="function_to_exclude", ) - assert parser.statements == {1,7} - parser = self.parse_text("""\ + assert parser.statements == {1, 7} + parser = self.parse_text( + """\ def my_func( super_long_input_argument_0=0, super_long_input_argument_1=1, @@ -520,7 +561,8 @@ def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, supe exclude="my_func", ) assert parser.statements == set() - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def my_func( super_long_input_argument_0=0, super_long_input_argument_1=1, @@ -532,8 +574,9 @@ def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, supe """, exclude="my_func_2", ) - assert parser.statements == {1,5} - parser = self.parse_text("""\ + assert parser.statements == {1, 5} + parser = self.parse_text( + """\ def my_func ( super_long_input_argument_0=0, super_long_input_argument_1=1, @@ -545,8 +588,9 @@ def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, """, exclude="my_func_2", ) - assert parser.statements == {1,5} - parser = self.parse_text("""\ + assert parser.statements == {1, 5} + parser = self.parse_text( + """\ def my_func ( super_long_input_argument_0=0, super_long_input_argument_1=1, @@ -559,7 +603,8 @@ def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, exclude="my_func", ) assert parser.statements == set() - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def my_func \ ( super_long_input_argument_0=0, @@ -572,8 +617,9 @@ def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, supe """, exclude="my_func_2", ) - assert parser.statements == {1,5} - parser = self.parse_text("""\ + assert parser.statements == {1, 5} + parser = self.parse_text( + """\ def my_func \ ( super_long_input_argument_0=0, @@ -590,7 +636,8 @@ def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, supe def test_excluding_bug1713(self) -> None: if env.PYVERSION >= (3, 10): - parser = self.parse_text("""\ + parser = self.parse_text( + """\ print("1") def hello_3(a): # pragma: nocover @@ -605,7 +652,8 @@ def hello_3(a): # pragma: nocover """, ) assert parser.statements == {1, 11} - parser = self.parse_text("""\ + parser = self.parse_text( + """\ print("1") def hello_3(a): # nocover @@ -619,7 +667,8 @@ def hello_3(a): # nocover """, ) assert parser.statements == {1, 10} - parser = self.parse_text("""\ + parser = self.parse_text( + """\ print(1) def func(a, b): @@ -633,7 +682,8 @@ def func(a, b): """, ) assert parser.statements == {1, 3, 10} - parser = self.parse_text("""\ + parser = self.parse_text( + """\ class Foo: # pragma: nocover def greet(self): print("hello world") @@ -642,7 +692,8 @@ def greet(self): assert parser.statements == set() def test_excluding_method(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ class Fooey: def __init__(self): self.a = 1 @@ -654,8 +705,9 @@ def foo(self): # nocover assert x.a == 1 """, ) - assert parser.statements == {1,2,3,8,9} - parser = self.parse_text("""\ + assert parser.statements == {1, 2, 3, 8, 9} + parser = self.parse_text( + """\ class Fooey: def __init__(self): self.a = 1 @@ -671,10 +723,11 @@ def very_long_method_to_exclude_name( """, exclude="method_to_exclude", ) - assert parser.statements == {1,2,3,11,12} + assert parser.statements == {1, 2, 3, 11, 12} def test_excluding_class(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ class Fooey: # nocover def __init__(self): self.a = 1 @@ -686,10 +739,11 @@ def foo(self): assert x == 1 """, ) - assert parser.statements == {8,9} + assert parser.statements == {8, 9} def test_excludes_non_ascii(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ # coding: utf-8 a = 1; b = 2 @@ -701,7 +755,8 @@ def test_excludes_non_ascii(self) -> None: assert parser.statements == {2, 4} def test_no_exclude_at_all(self) -> None: - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): if fooey: a = 3 @@ -711,11 +766,12 @@ def foo(): """, exclude="", ) - assert parser.exit_counts() == { 1:1, 2:2, 3:1, 5:1, 6:1 } + assert parser.exit_counts() == {1: 1, 2: 2, 3: 1, 5: 1, 6: 1} def test_formfeed(self) -> None: # https://github.com/nedbat/coveragepy/issues/461 - parser = self.parse_text("""\ + parser = self.parse_text( + """\ x = 1 assert len([]) == 0, ( "This won't happen %s" % ("hello",) @@ -813,10 +869,13 @@ def function() -> int: def test_multiline_exclusion_single_line(self) -> None: regex = r"print\('.*'\)" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): print('Hello, world!') - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {2} assert parser.raw_statements == {1, 2} assert parser.statements == {1} @@ -824,23 +883,29 @@ def foo(): def test_multiline_exclusion_suite(self) -> None: # A multi-line exclusion that matches a colon line still excludes the entire block. regex = r"if T:\n\s+print\('Hello, world!'\)" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): if T: print('Hello, world!') print('This is a multiline regex test.') a = 5 - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {2, 3} assert parser.raw_statements == {1, 2, 3, 4, 5} assert parser.statements == {1, 5} def test_multiline_exclusion_no_match(self) -> None: regex = r"nonexistent" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): print('Hello, world!') - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == set() assert parser.raw_statements == {1, 2} assert parser.statements == {1, 2} @@ -855,7 +920,8 @@ def test_multiline_exclusion_no_source(self) -> None: def test_multiline_exclusion_all_lines_must_match(self) -> None: # https://github.com/nedbat/coveragepy/issues/996 regex = r"except ValueError:\n\s*print\('false'\)" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ try: a = 2 print('false') @@ -865,33 +931,41 @@ def test_multiline_exclusion_all_lines_must_match(self) -> None: print('something else') except IndexError: print('false') - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {4, 5} assert parser.raw_statements == {1, 2, 3, 4, 5, 6, 7, 8, 9} assert parser.statements == {1, 2, 3, 6, 7, 8, 9} def test_multiline_exclusion_multiple_matches(self) -> None: regex = r"print\('.*'\)\n\s+. = \d" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): print('Hello, world!') a = 5 def bar(): print('Hello again!') b = 6 - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {2, 3, 5, 6} assert parser.raw_statements == {1, 2, 3, 4, 5, 6} assert parser.statements == {1, 4} def test_multiline_exclusion_suite2(self) -> None: regex = r"print\('Hello, world!'\)\n\s+if T:" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): print('Hello, world!') if T: print('This is a test.') - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {2, 3} assert parser.raw_statements == {1, 2, 3, 4} assert parser.statements == {1} @@ -901,12 +975,15 @@ def test_multiline_exclusion_match_all(self) -> None: r"def foo\(\):\n\s+print\('Hello, world!'\)\n" + r"\s+if T:\n\s+print\('This is a test\.'\)" ) - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def foo(): print('Hello, world!') if T: print('This is a test.') - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {1, 2, 3, 4} assert parser.raw_statements == {1, 2, 3, 4} assert parser.statements == set() @@ -914,7 +991,8 @@ def foo(): def test_multiline_exclusion_block(self) -> None: # https://github.com/nedbat/coveragepy/issues/1803 regex = "# no cover: start(?s:.)*?# no cover: stop" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ a = my_function1() if debug: msg = "blah blah" @@ -926,7 +1004,9 @@ def test_multiline_exclusion_block(self) -> None: # no cover: start but_not_this() # no cover: stop - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {4, 5, 6, 7, 9, 10, 11} assert parser.raw_statements == {1, 2, 3, 5, 6, 8, 10} assert parser.statements == {1, 2, 3, 8} @@ -935,7 +1015,8 @@ def test_multiline_exclusion_block(self) -> None: def test_multiline_exclusion_block2(self) -> None: # https://github.com/nedbat/coveragepy/issues/1797 regex = r"case _:\n\s+assert_never\(" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ match something: case type_1(): logic_1() @@ -950,7 +1031,9 @@ def test_multiline_exclusion_block2(self) -> None: logic_2() case _: print("Default case") - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {6, 7} assert parser.raw_statements == {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} assert parser.statements == {1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14} @@ -959,7 +1042,8 @@ def test_multiline_exclusion_block3(self) -> None: # https://github.com/nedbat/coveragepy/issues/1741 # This will only work if there's exactly one return statement in the rest of the function regex = r"# no cover: to return(?s:.)*?return" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ def my_function(args, j): if args.command == Commands.CMD.value: return cmd_handler(j, args) @@ -969,7 +1053,9 @@ def my_function(args, j): return os.EX_USAGE print("not excluded") - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {4, 5, 6, 7, 8} assert parser.raw_statements == {1, 2, 3, 5, 6, 8, 9} assert parser.statements == {1, 2, 3, 9} @@ -977,7 +1063,8 @@ def my_function(args, j): def test_multiline_exclusion_whole_source(self) -> None: # https://github.com/nedbat/coveragepy/issues/118 regex = r"\A(?s:.*# pragma: exclude file.*)\Z" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ import coverage # pragma: exclude file def the_void() -> None: @@ -987,7 +1074,9 @@ def the_void() -> None: return print("Excluded too") - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} assert parser.raw_statements == {1, 3, 4, 5, 6, 8, 9} assert parser.statements == set() @@ -995,7 +1084,8 @@ def the_void() -> None: def test_multiline_exclusion_from_marker(self) -> None: # https://github.com/nedbat/coveragepy/issues/118 regex = r"# pragma: rest of file(?s:.)*\Z" - parser = self.parse_text("""\ + parser = self.parse_text( + """\ import coverage # pragma: rest of file def the_void() -> None: @@ -1005,7 +1095,9 @@ def the_void() -> None: return print("Excluded too") - """, regex) + """, + regex, + ) assert parser.lines_matching(regex) == {2, 3, 4, 5, 6, 7, 8, 9, 10} assert parser.raw_statements == {1, 3, 4, 5, 6, 8, 9} assert parser.statements == {1} @@ -1036,8 +1128,8 @@ def func10(): expected = "line 1 didn't jump to line 3 because the condition on line 1 was always true" assert expected == parser.missing_arc_description(1, 3) expected = ( - "line 6 didn't return from function 'func5' " + - "because the loop on line 6 didn't complete" + "line 6 didn't return from function 'func5' " + + "because the loop on line 6 didn't complete" ) assert expected == parser.missing_arc_description(6, -5) expected = "line 6 didn't jump to line 7 because the loop on line 6 never started" @@ -1045,8 +1137,7 @@ def func10(): expected = "line 11 didn't jump to line 12 because the condition on line 11 was never true" assert expected == parser.missing_arc_description(11, 12) expected = ( - "line 11 didn't jump to line 13 " + - "because the condition on line 11 was always true" + "line 11 didn't jump to line 13 " + "because the condition on line 11 was always true" ) assert expected == parser.missing_arc_description(11, 13) @@ -1060,13 +1151,11 @@ def test_missing_arc_descriptions_for_exceptions(self) -> None: print("yikes") """) expected = ( - "line 3 didn't jump to line 4 " + - "because the exception caught by line 3 didn't happen" + "line 3 didn't jump to line 4 " + "because the exception caught by line 3 didn't happen" ) assert expected == parser.missing_arc_description(3, 4) expected = ( - "line 5 didn't jump to line 6 " + - "because the exception caught by line 5 didn't happen" + "line 5 didn't jump to line 6 " + "because the exception caught by line 5 didn't happen" ) assert expected == parser.missing_arc_description(5, 6) @@ -1140,9 +1229,14 @@ def parse_file(self, filename: str) -> PythonParser: parser.parse_source() return parser - @pytest.mark.parametrize("slug, newline", [ - ("unix", "\n"), ("dos", "\r\n"), ("mac", "\r"), - ]) + @pytest.mark.parametrize( + "slug, newline", + [ + ("unix", "\n"), + ("dos", "\r\n"), + ("mac", "\r"), + ], + ) def test_line_endings(self, slug: str, newline: str) -> None: text = """\ # check some basic branch counting @@ -1156,16 +1250,19 @@ def foo(self, a): class Bar: pass """ - counts = { 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1 } + counts = {2: 1, 3: 1, 4: 2, 5: 1, 7: 1, 9: 1, 10: 1} fname = slug + ".py" self.make_file(fname, text, newline=newline) parser = self.parse_file(fname) assert parser.exit_counts() == counts, f"Wrong for {fname!r}" def test_encoding(self) -> None: - self.make_file("encoded.py", """\ + self.make_file( + "encoded.py", + """\ coverage = "\xe7\xf6v\xear\xe3g\xe9" - """) + """, + ) parser = self.parse_file("encoded.py") assert parser.exit_counts() == {1: 1} @@ -1174,21 +1271,27 @@ def test_missing_line_ending(self) -> None: # multi-line statement has no final newline. # https://github.com/nedbat/coveragepy/issues/293 - self.make_file("normal.py", """\ + self.make_file( + "normal.py", + """\ out, err = some_module.some_function( ["my data", "-c", "pass"], arg1=some_module.NAME, arg2=some_module.OTHER_NAME).function() - """) + """, + ) parser = self.parse_file("normal.py") assert parser.statements == {1} - self.make_file("abrupt.py", """\ + self.make_file( + "abrupt.py", + """\ out, err = some_module.some_function( ["my data", "-c", "pass"], arg1=some_module.NAME, - arg2=some_module.OTHER_NAME).function()""") # no final newline. + arg2=some_module.OTHER_NAME).function()""", + ) # no final newline. # Double-check that some test helper wasn't being helpful. with open("abrupt.py", encoding="utf-8") as f: @@ -1220,7 +1323,7 @@ def test_os_error(self) -> None: ("__debug__ + True", (False, False)), ("x", (False, False)), ("__debug__ or debug", (False, False)), - ] + ], ) def test_is_constant_test_expr(expr: str, ret: tuple[bool, bool]) -> None: node = ast.parse(expr, mode="eval").body diff --git a/tests/test_phystokens.py b/tests/test_phystokens.py index 2140bd712..7a47e82d4 100644 --- a/tests/test_phystokens.py +++ b/tests/test_phystokens.py @@ -28,11 +28,19 @@ def foo(): """ SIMPLE_TOKENS = [ - [('com', "# yay!")], - [('key', 'def'), ('ws', ' '), ('nam', 'foo'), ('op', '('), ('op', ')'), ('op', ':')], - [('ws', ' '), ('nam', 'say'), ('op', '('), - ('str', "'two = %d'"), ('ws', ' '), ('op', '%'), - ('ws', ' '), ('num', '2'), ('op', ')')], + [("com", "# yay!")], + [("key", "def"), ("ws", " "), ("nam", "foo"), ("op", "("), ("op", ")"), ("op", ":")], + [ + ("ws", " "), + ("nam", "say"), + ("op", "("), + ("str", "'two = %d'"), + ("ws", " "), + ("op", "%"), + ("ws", " "), + ("num", "2"), + ("op", ")"), + ], ] # Mixed-white-space program, and its token stream. @@ -43,9 +51,9 @@ def hello(): """ MIXED_WS_TOKENS = [ - [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ('op', ')'), ('op', ':')], - [('ws', ' '), ('nam', 'a'), ('op', '='), ('str', '"Hello world!"')], - [('ws', ' '), ('nam', 'b'), ('op', '='), ('str', '"indented"')], + [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ("op", ")"), ("op", ":")], + [("ws", " "), ("nam", "a"), ("op", "="), ("str", '"Hello world!"')], + [("ws", " "), ("nam", "b"), ("op", "="), ("str", '"indented"')], ] # https://github.com/nedbat/coveragepy/issues/822 @@ -56,6 +64,7 @@ def hello(): print( "Message 2" ) """ + class PhysTokensTest(CoverageTest): """Tests for coverage.py's improved tokenizer.""" @@ -69,7 +78,7 @@ def check_tokenization(self, source: str) -> None: tokenized += text + "\n" # source_token_lines doesn't preserve trailing spaces, so trim all that # before comparing. - source = source.replace('\r\n', '\n') + source = source.replace("\r\n", "\n") source = re.sub(r"(?m)[ \t]+$", "", source) tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized) assert source == tokenized @@ -100,26 +109,41 @@ def test_tokenize_real_file(self) -> None: def test_1828(self) -> None: # https://github.com/nedbat/coveragepy/pull/1828 - tokens = list(source_token_lines(textwrap.dedent(""" + tokens = list( + source_token_lines( + textwrap.dedent(""" x = \ 1 a = ["aaa",\\ "bbb \\ ccc"] - """))) + """) + ) + ) assert tokens == [ [], - [('nam', 'x'), ('ws', ' '), ('op', '='), ('ws', ' '), ('num', '1')], - [('nam', 'a'), ('ws', ' '), ('op', '='), ('ws', ' '), - ('op', '['), ('str', '"aaa"'), ('op', ','), ('xx', '\\')], - [('ws', ' '), ('str', '"bbb \\')], - [('str', ' ccc"'), ('op', ']')], + [("nam", "x"), ("ws", " "), ("op", "="), ("ws", " "), ("num", "1")], + [ + ("nam", "a"), + ("ws", " "), + ("op", "="), + ("ws", " "), + ("op", "["), + ("str", '"aaa"'), + ("op", ","), + ("xx", "\\"), + ], + [("ws", " "), ("str", '"bbb \\')], + [("str", ' ccc"'), ("op", "]")], ] - @pytest.mark.parametrize("fname", [ - "stress_phystoken.tok", - "stress_phystoken_dos.tok", - ]) + @pytest.mark.parametrize( + "fname", + [ + "stress_phystoken.tok", + "stress_phystoken_dos.tok", + ], + ) def test_stress(self, fname: str) -> None: # Check the tokenization of the stress-test files. # And check that those files haven't been incorrectly "fixed". @@ -132,9 +156,13 @@ def test_stress(self, fname: str) -> None: assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space." def test_fstring_middle(self) -> None: - tokens = list(source_token_lines(textwrap.dedent("""\ + tokens = list( + source_token_lines( + textwrap.dedent("""\ f'Look: {x} {{x}}!' - """))) + """) + ) + ) if env.PYBEHAVIOR.fstring_syntax: assert tokens == [ [ @@ -216,6 +244,7 @@ def test_soft_keyword_type(self) -> None: (2, b"# -*- coding:cp850 -*-\n# vim: fileencoding=cp850\n", "cp850"), ] + class SourceEncodingTest(CoverageTest): """Tests of source_encoding() for detecting encodings.""" @@ -227,7 +256,7 @@ def test_detect_source_encoding(self) -> None: def test_detect_source_encoding_not_in_comment(self) -> None: # Should not detect anything here - source = b'def parse(src, encoding=None):\n pass' + source = b"def parse(src, encoding=None):\n pass" assert source_encoding(source) == DEF_ENCODING def test_dont_detect_source_encoding_on_third_line(self) -> None: @@ -241,16 +270,16 @@ def test_detect_source_encoding_of_empty_file(self) -> None: def test_bom(self) -> None: # A BOM means utf-8. - source = b"\xEF\xBB\xBFtext = 'hello'\n" - assert source_encoding(source) == 'utf-8-sig' + source = b"\xef\xbb\xbftext = 'hello'\n" + assert source_encoding(source) == "utf-8-sig" def test_bom_with_encoding(self) -> None: - source = b"\xEF\xBB\xBF# coding: utf-8\ntext = 'hello'\n" - assert source_encoding(source) == 'utf-8-sig' + source = b"\xef\xbb\xbf# coding: utf-8\ntext = 'hello'\n" + assert source_encoding(source) == "utf-8-sig" def test_bom_is_wrong(self) -> None: # A BOM with an explicit non-utf8 encoding is an error. - source = b"\xEF\xBB\xBF# coding: cp850\n" + source = b"\xef\xbb\xbf# coding: cp850\n" with pytest.raises(SyntaxError, match="encoding problem: utf-8"): source_encoding(source) diff --git a/tests/test_plugins.py b/tests/test_plugins.py index a94104a01..ce378e976 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -32,8 +32,9 @@ class NullConfig(TPluginConfig): """A plugin configure thing when we don't really need one.""" + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: - return {} # pragma: never called + return {} # pragma: never called class FakeConfig(TPluginConfig): @@ -67,7 +68,9 @@ class LoadPluginsTest(CoverageTest): """Test Plugins construction.""" def test_implicit_boolean(self) -> None: - self.make_file("plugin1.py", """\ + self.make_file( + "plugin1.py", + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -75,7 +78,8 @@ class Plugin(CoveragePlugin): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) config = FakeConfig("plugin1", {}) plugins = make_plugins([], config) @@ -85,7 +89,9 @@ def coverage_init(reg, options): assert plugins def test_importing_and_configuring(self) -> None: - self.make_file("plugin1.py", """\ + self.make_file( + "plugin1.py", + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -95,18 +101,21 @@ def __init__(self, options): def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) - """) + """, + ) - config = FakeConfig("plugin1", {'a': 'hello'}) + config = FakeConfig("plugin1", {"a": "hello"}) plugins = list(make_plugins(["plugin1"], config)) assert len(plugins) == 1 - assert plugins[0].this_is == "me" # type: ignore - assert plugins[0].options == {'a': 'hello'} # type: ignore - assert config.asked_for == ['plugin1'] + assert plugins[0].this_is == "me" # type: ignore + assert plugins[0].options == {"a": "hello"} # type: ignore + assert config.asked_for == ["plugin1"] def test_importing_and_configuring_more_than_one(self) -> None: - self.make_file("plugin1.py", """\ + self.make_file( + "plugin1.py", + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -116,8 +125,11 @@ def __init__(self, options): def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) - """) - self.make_file("plugin2.py", """\ + """, + ) + self.make_file( + "plugin2.py", + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -126,35 +138,39 @@ def __init__(self, options): def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) - """) + """, + ) - config = FakeConfig("plugin1", {'a': 'hello'}) + config = FakeConfig("plugin1", {"a": "hello"}) plugins = list(make_plugins(["plugin1", "plugin2"], config)) assert len(plugins) == 2 - assert plugins[0].this_is == "me" # type: ignore - assert plugins[0].options == {'a': 'hello'} # type: ignore - assert plugins[1].options == {} # type: ignore - assert config.asked_for == ['plugin1', 'plugin2'] + assert plugins[0].this_is == "me" # type: ignore + assert plugins[0].options == {"a": "hello"} # type: ignore + assert plugins[1].options == {} # type: ignore + assert config.asked_for == ["plugin1", "plugin2"] # The order matters... - config = FakeConfig("plugin1", {'a': 'second'}) + config = FakeConfig("plugin1", {"a": "second"}) plugins = list(make_plugins(["plugin2", "plugin1"], config)) assert len(plugins) == 2 - assert plugins[0].options == {} # type: ignore - assert plugins[1].this_is == "me" # type: ignore - assert plugins[1].options == {'a': 'second'} # type: ignore + assert plugins[0].options == {} # type: ignore + assert plugins[1].this_is == "me" # type: ignore + assert plugins[1].options == {"a": "second"} # type: ignore def test_cant_import(self) -> None: with pytest.raises(ImportError, match="No module named '?plugin_not_there'?"): _ = make_plugins(["plugin_not_there"], NullConfig()) def test_plugin_must_define_coverage_init(self) -> None: - self.make_file("no_plugin.py", """\ + self.make_file( + "no_plugin.py", + """\ from coverage import CoveragePlugin Nothing = 0 - """) + """, + ) msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function" with pytest.raises(PluginError, match=msg_pat): list(make_plugins(["no_plugin"], NullConfig())) @@ -165,7 +181,9 @@ class PluginTest(CoverageTest): def test_plugin_imported(self) -> None: # Prove that a plugin will be imported. - self.make_file("my_plugin.py", """\ + self.make_file( + "my_plugin.py", + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): pass @@ -173,13 +191,14 @@ def coverage_init(reg, options): reg.add_noop(Plugin()) with open("evidence.out", "w", encoding="utf-8") as f: f.write("we are here!") - """) + """, + ) self.assert_doesnt_exist("evidence.out") cov = coverage.Coverage() cov.set_option("run:plugins", ["my_plugin"]) cov.start() - cov.stop() # pragma: nested + cov.stop() # pragma: nested with open("evidence.out", encoding="utf-8") as f: assert f.read() == "we are here!" @@ -202,7 +221,9 @@ def test_bad_plugin_isnt_hidden(self) -> None: cov.stop() def test_plugin_sys_info(self) -> None: - self.make_file("plugin_sys_info.py", """\ + self.make_file( + "plugin_sys_info.py", + """\ import coverage class Plugin(coverage.CoveragePlugin): @@ -211,7 +232,8 @@ def sys_info(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) debug_out = io.StringIO() cov = coverage.Coverage(debug=["sys"]) cov._debug_file = debug_out @@ -220,23 +242,25 @@ def coverage_init(reg, options): r"Plugin file tracers \(plugin_sys_info.Plugin\) aren't supported with .*", ): cov.start() - cov.stop() # pragma: nested + cov.stop() # pragma: nested out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] if testenv.C_TRACER: - assert 'plugins.file_tracers: plugin_sys_info.Plugin' in out_lines + assert "plugins.file_tracers: plugin_sys_info.Plugin" in out_lines else: - assert 'plugins.file_tracers: plugin_sys_info.Plugin (disabled)' in out_lines - assert 'plugins.configurers: -none-' in out_lines + assert "plugins.file_tracers: plugin_sys_info.Plugin (disabled)" in out_lines + assert "plugins.configurers: -none-" in out_lines expected_end = [ "-- sys: plugin_sys_info.Plugin -------------------------------", "hello: world", "-- end -------------------------------------------------------", ] - assert expected_end == out_lines[-len(expected_end):] + assert expected_end == out_lines[-len(expected_end) :] def test_plugin_with_no_sys_info(self) -> None: - self.make_file("plugin_no_sys_info.py", """\ + self.make_file( + "plugin_no_sys_info.py", + """\ import coverage class Plugin(coverage.CoveragePlugin): @@ -244,37 +268,44 @@ class Plugin(coverage.CoveragePlugin): def coverage_init(reg, options): reg.add_configurer(Plugin()) - """) + """, + ) debug_out = io.StringIO() cov = coverage.Coverage(debug=["sys"]) cov._debug_file = debug_out cov.set_option("run:plugins", ["plugin_no_sys_info"]) cov.start() - cov.stop() # pragma: nested + cov.stop() # pragma: nested out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] - assert 'plugins.file_tracers: -none-' in out_lines - assert 'plugins.configurers: plugin_no_sys_info.Plugin' in out_lines + assert "plugins.file_tracers: -none-" in out_lines + assert "plugins.configurers: plugin_no_sys_info.Plugin" in out_lines expected_end = [ "-- sys: plugin_no_sys_info.Plugin ----------------------------", "-- end -------------------------------------------------------", ] - assert expected_end == out_lines[-len(expected_end):] + assert expected_end == out_lines[-len(expected_end) :] def test_local_files_are_importable(self) -> None: - self.make_file("importing_plugin.py", """\ + self.make_file( + "importing_plugin.py", + """\ from coverage import CoveragePlugin import local_module class MyPlugin(CoveragePlugin): pass def coverage_init(reg, options): reg.add_noop(MyPlugin()) - """) + """, + ) self.make_file("local_module.py", "CONST = 1") - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] plugins = importing_plugin - """) + """, + ) self.make_file("main_file.py", "print('MAIN')") out = self.run_command("coverage run main_file.py") @@ -284,7 +315,8 @@ def coverage_init(reg, options): def test_coverage_init_plugins(self) -> None: called = False - def coverage_init(reg: Plugins) -> None: # pylint: disable=unused-argument + + def coverage_init(reg: Plugins) -> None: # pylint: disable=unused-argument nonlocal called called = True @@ -298,6 +330,7 @@ def coverage_init(reg: Plugins) -> None: # pylint: disable=unused-argument @pytest.mark.skipif(testenv.PLUGINS, reason="This core doesn't support plugins.") class PluginWarningOnPyTracerTest(CoverageTest): """Test that we get a controlled exception when plugins aren't supported.""" + def test_exception_if_plugins_on_pytracer(self) -> None: self.make_file("simple.py", "a = 1") @@ -311,7 +344,7 @@ def test_exception_if_plugins_on_pytracer(self) -> None: core = "SysMonitor" expected_warnings = [ - fr"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with {core}", + rf"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with {core}", ] with self.assert_warnings(cov, expected_warnings): self.start_import_stop(cov, "simple") @@ -326,19 +359,25 @@ class GoodFileTracerTest(FileTracerTest): """Tests of file tracer plugin happy paths.""" def test_plugin1(self) -> None: - self.make_file("simple.py", """\ + self.make_file( + "simple.py", + """\ import try_xyz a = 1 b = 2 - """) - self.make_file("try_xyz.py", """\ + """, + ) + self.make_file( + "try_xyz.py", + """\ c = 3 d = 4 - """) + """, + ) cov = coverage.Coverage() - CheckUniqueFilenames.hook(cov, '_should_trace') - CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') + CheckUniqueFilenames.hook(cov, "_should_trace") + CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin1"]) # Import the Python file, executing it. @@ -356,7 +395,9 @@ def make_render_and_caller(self) -> None: # plugin2 emulates a dynamic tracing plugin: the caller's locals # are examined to determine the source file and line number. # The plugin is in tests/plugin2.py. - self.make_file("render.py", """\ + self.make_file( + "render.py", + """\ def render(filename, linenum): # This function emulates a template renderer. The plugin # will examine the `filename` and `linenum` locals to @@ -368,8 +409,11 @@ def helper(x): # This function is here just to show that not all code in # this file will be part of the dynamic tracing. return x+1 - """) - self.make_file("caller.py", """\ + """, + ) + self.make_file( + "caller.py", + """\ import sys from render import helper, render @@ -383,7 +427,8 @@ def helper(x): # quux_5.html will be omitted from the results. assert render("quux_5.html", 3) == "[quux_5.html @ 3]" - """) + """, + ) # will try to read the actual source files, so make some # source files. @@ -398,8 +443,8 @@ def test_plugin2(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(omit=["*quux*"]) - CheckUniqueFilenames.hook(cov, '_should_trace') - CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') + CheckUniqueFilenames.hook(cov, "_should_trace") + CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") @@ -423,8 +468,8 @@ def test_plugin2_with_branch(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) - CheckUniqueFilenames.hook(cov, '_should_trace') - CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') + CheckUniqueFilenames.hook(cov, "_should_trace") + CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") @@ -452,12 +497,12 @@ def test_plugin2_with_text_report(self) -> None: total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True) report = repout.getvalue().splitlines() expected = [ - 'Name Stmts Miss Branch BrPart Cover Missing', - '--------------------------------------------------------', - 'bar_4.html 4 2 0 0 50% 1, 4', - 'foo_7.html 7 5 0 0 29% 1-3, 6-7', - '--------------------------------------------------------', - 'TOTAL 11 7 0 0 36%', + "Name Stmts Miss Branch BrPart Cover Missing", + "--------------------------------------------------------", + "bar_4.html 4 2 0 0 50% 1, 4", + "foo_7.html 7 5 0 0 29% 1-3, 6-7", + "--------------------------------------------------------", + "TOTAL 11 7 0 0 36%", ] assert expected == report assert math.isclose(total, 4 / 11 * 100) @@ -491,26 +536,28 @@ def test_plugin2_with_xml_report(self) -> None: dom = ElementTree.parse("coverage.xml") classes = {} for elt in dom.findall(".//class"): - classes[elt.get('name')] = elt - - assert classes['bar_4.html'].attrib == { - 'branch-rate': '1', - 'complexity': '0', - 'filename': 'bar_4.html', - 'line-rate': '0.5', - 'name': 'bar_4.html', + classes[elt.get("name")] = elt + + assert classes["bar_4.html"].attrib == { + "branch-rate": "1", + "complexity": "0", + "filename": "bar_4.html", + "line-rate": "0.5", + "name": "bar_4.html", } - assert classes['foo_7.html'].attrib == { - 'branch-rate': '1', - 'complexity': '0', - 'filename': 'foo_7.html', - 'line-rate': '0.2857', - 'name': 'foo_7.html', + assert classes["foo_7.html"].attrib == { + "branch-rate": "1", + "complexity": "0", + "filename": "foo_7.html", + "line-rate": "0.2857", + "name": "foo_7.html", } def test_defer_to_python(self) -> None: # A plugin that measures, but then wants built-in python reporting. - self.make_file("fairly_odd_plugin.py", """\ + self.make_file( + "fairly_odd_plugin.py", + """\ # A plugin that claims all the odd lines are executed, and none of # the even lines, and then punts reporting off to the built-in # Python reporting. @@ -535,15 +582,19 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) - self.make_file("unsuspecting.py", """\ + """, + ) + self.make_file( + "unsuspecting.py", + """\ a = 1 b = 2 c = 3 d = 4 e = 5 f = 6 - """) + """, + ) cov = coverage.Coverage(include=["unsuspecting.py"]) cov.set_option("run:plugins", ["fairly_odd_plugin"]) self.start_import_stop(cov, "unsuspecting") @@ -552,17 +603,19 @@ def coverage_init(reg, options): total = cov.report(file=repout, show_missing=True) report = repout.getvalue().splitlines() expected = [ - 'Name Stmts Miss Cover Missing', - '-----------------------------------------------', - 'unsuspecting.py 6 3 50% 2, 4, 6', - '-----------------------------------------------', - 'TOTAL 6 3 50%', + "Name Stmts Miss Cover Missing", + "-----------------------------------------------", + "unsuspecting.py 6 3 50% 2, 4, 6", + "-----------------------------------------------", + "TOTAL 6 3 50%", ] assert expected == report assert total == 50 def test_find_unexecuted(self) -> None: - self.make_file("unexecuted_plugin.py", """\ + self.make_file( + "unexecuted_plugin.py", + """\ import os import coverage.plugin class Plugin(coverage.CoveragePlugin): @@ -592,9 +645,10 @@ def lines(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.make_file("foo.py", "a = 1") - cov = coverage.Coverage(source=['.']) + cov = coverage.Coverage(source=["."]) cov.set_option("run:plugins", ["unexecuted_plugin"]) self.start_import_stop(cov, "foo") @@ -625,23 +679,32 @@ def run_plugin(self, module_name: str) -> Coverage: Returns the Coverage object. """ - self.make_file("simple.py", """\ + self.make_file( + "simple.py", + """\ import other, another a = other.f(2) b = other.f(3) c = another.g(4) d = another.g(5) - """) + """, + ) # The names of these files are important: some plugins apply themselves # to "*other.py". - self.make_file("other.py", """\ + self.make_file( + "other.py", + """\ def f(x): return x+1 - """) - self.make_file("another.py", """\ + """, + ) + self.make_file( + "another.py", + """\ def g(x): return x-1 - """) + """, + ) cov = coverage.Coverage() cov.set_option("run:plugins", [module_name]) @@ -697,21 +760,26 @@ def run_bad_plugin( if excmsg: assert excmsg in stderr if excmsgs: - found_exc = any(em in stderr for em in excmsgs) # pragma: part covered + found_exc = any(em in stderr for em in excmsgs) # pragma: part covered assert found_exc, f"expected one of {excmsgs} in stderr" def test_file_tracer_has_no_file_tracer_method(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ class Plugin(object): pass def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin", our_error=False) def test_file_tracer_has_inherited_sourcefilename_method(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): @@ -723,14 +791,19 @@ class FileTracer(coverage.FileTracer): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, + "bad_plugin", + "Plugin", + our_error=False, excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()", ) def test_plugin_has_inherited_filereporter_method(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): @@ -743,14 +816,17 @@ def source_filename(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) cov = self.run_plugin("bad_plugin") expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()" with pytest.raises(NotImplementedError, match=expected_msg): cov.report() def test_file_tracer_fails(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -758,13 +834,16 @@ def file_tracer(self, filename): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin") def test_file_tracer_fails_eventually(self) -> None: # Django coverage plugin can report on a few files and then fail. # https://github.com/nedbat/coveragepy/issues/1011 - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import os.path import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): @@ -789,11 +868,14 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin") def test_file_tracer_returns_wrong(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -801,13 +883,19 @@ def file_tracer(self, filename): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, excmsg="'float' object has no attribute", + "bad_plugin", + "Plugin", + our_error=False, + excmsg="'float' object has no attribute", ) def test_has_dynamic_source_filename_fails(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -819,11 +907,14 @@ def has_dynamic_source_filename(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin") def test_source_filename_fails(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -835,11 +926,14 @@ def source_filename(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin") def test_source_filename_returns_wrong(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -851,9 +945,12 @@ def source_filename(self): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, + "bad_plugin", + "Plugin", + our_error=False, excmsgs=[ "expected str, bytes or os.PathLike object, not float", "'float' object has no attribute", @@ -863,7 +960,9 @@ def coverage_init(reg, options): ) def test_dynamic_source_filename_fails(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -878,11 +977,14 @@ def dynamic_source_filename(self, filename, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin("bad_plugin", "Plugin") def test_line_number_range_raises_error(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -898,13 +1000,19 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, excmsg="borked!", + "bad_plugin", + "Plugin", + our_error=False, + excmsg="borked!", ) def test_line_number_range_returns_non_tuple(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -920,13 +1028,19 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", + "bad_plugin", + "Plugin", + our_error=False, + excmsg="line_number_range must return 2-tuple", ) def test_line_number_range_returns_triple(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -942,13 +1056,19 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", + "bad_plugin", + "Plugin", + our_error=False, + excmsg="line_number_range must return 2-tuple", ) def test_line_number_range_returns_pair_of_strings(self) -> None: - self.make_file("bad_plugin.py", """\ + self.make_file( + "bad_plugin.py", + """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): @@ -964,9 +1084,12 @@ def line_number_range(self, frame): def coverage_init(reg, options): reg.add_file_tracer(Plugin()) - """) + """, + ) self.run_bad_plugin( - "bad_plugin", "Plugin", our_error=False, + "bad_plugin", + "Plugin", + our_error=False, excmsgs=[ "an integer is required", "cannot be interpreted as an integer", @@ -983,7 +1106,7 @@ def test_configurer_plugin(self) -> None: cov = coverage.Coverage() cov.set_option("run:plugins", ["tests.plugin_config"]) cov.start() - cov.stop() # pragma: nested + cov.stop() # pragma: nested excluded = cov.get_option("report:exclude_lines") assert isinstance(excluded, list) assert "pragma: custom" in excluded @@ -996,7 +1119,9 @@ class DynamicContextPluginTest(CoverageTest): def make_plugin_capitalized_testnames(self, filename: str) -> None: """Create a dynamic context plugin that capitalizes the part after 'test_'.""" - self.make_file(filename, """\ + self.make_file( + filename, + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -1009,11 +1134,14 @@ def dynamic_context(self, frame): def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) - """) + """, + ) def make_plugin_track_render(self, filename: str) -> None: """Make a dynamic context plugin that tracks 'render_' functions.""" - self.make_file(filename, """\ + self.make_file( + filename, + """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): @@ -1025,11 +1153,14 @@ def dynamic_context(self, frame): def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) - """) + """, + ) def make_test_files(self) -> None: """Make some files to use while testing dynamic context plugins.""" - self.make_file("rendering.py", """\ + self.make_file( + "rendering.py", + """\ def html_tag(tag, content): return f'<{tag}>{content}' @@ -1041,9 +1172,12 @@ def render_span(text): def render_bold(text): return html_tag('b', text) - """) + """, + ) - self.make_file("testsuite.py", """\ + self.make_file( + "testsuite.py", + """\ import rendering def test_html_tag() -> None: @@ -1064,9 +1198,10 @@ def build_full_html(): rendering.render_paragraph( rendering.render_span('hello'))) return html - """) + """, + ) - def run_all_functions(self, cov: Coverage, suite_name: str) -> None: # pragma: nested + def run_all_functions(self, cov: Coverage, suite_name: str) -> None: # pragma: nested """Run all functions in `suite_name` under coverage.""" cov.start() suite = import_local_file(suite_name) @@ -1080,60 +1215,60 @@ def run_all_functions(self, cov: Coverage, suite_name: str) -> None: # pragma cov.stop() def test_plugin_standalone(self) -> None: - self.make_plugin_capitalized_testnames('plugin_tests.py') + self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable dynamic context plugin cov = coverage.Coverage() - cov.set_option("run:plugins", ['plugin_tests']) + cov.set_option("run:plugins", ["plugin_tests"]) # Run the tests - self.run_all_functions(cov, 'testsuite') + self.run_all_functions(cov, "testsuite") # Labeled coverage is collected data = cov.get_data() filenames = self.get_measured_filenames(data) - expected = ['', 'doctest:HTML_TAG', 'test:HTML_TAG', 'test:RENDERERS'] + expected = ["", "doctest:HTML_TAG", "test:HTML_TAG", "test:RENDERERS"] assert expected == sorted(data.measured_contexts()) data.set_query_context("doctest:HTML_TAG") - assert [2] == sorted_lines(data, filenames['rendering.py']) + assert [2] == sorted_lines(data, filenames["rendering.py"]) data.set_query_context("test:HTML_TAG") - assert [2] == sorted_lines(data, filenames['rendering.py']) + assert [2] == sorted_lines(data, filenames["rendering.py"]) data.set_query_context("test:RENDERERS") - assert [2, 5, 8, 11] == sorted_lines(data, filenames['rendering.py']) + assert [2, 5, 8, 11] == sorted_lines(data, filenames["rendering.py"]) def test_static_context(self) -> None: - self.make_plugin_capitalized_testnames('plugin_tests.py') + self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable dynamic context plugin for coverage with named context - cov = coverage.Coverage(context='mytests') - cov.set_option("run:plugins", ['plugin_tests']) + cov = coverage.Coverage(context="mytests") + cov.set_option("run:plugins", ["plugin_tests"]) # Run the tests - self.run_all_functions(cov, 'testsuite') + self.run_all_functions(cov, "testsuite") # Static context prefix is preserved data = cov.get_data() expected = [ - 'mytests', - 'mytests|doctest:HTML_TAG', - 'mytests|test:HTML_TAG', - 'mytests|test:RENDERERS', + "mytests", + "mytests|doctest:HTML_TAG", + "mytests|test:HTML_TAG", + "mytests|test:RENDERERS", ] assert expected == sorted(data.measured_contexts()) def test_plugin_with_test_function(self) -> None: - self.make_plugin_capitalized_testnames('plugin_tests.py') + self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable both a plugin and test_function dynamic context cov = coverage.Coverage() - cov.set_option("run:plugins", ['plugin_tests']) + cov.set_option("run:plugins", ["plugin_tests"]) cov.set_option("run:dynamic_context", "test_function") # Run the tests - self.run_all_functions(cov, 'testsuite') + self.run_all_functions(cov, "testsuite") # test_function takes precedence over plugins - only # functions that are not labeled by test_function are @@ -1141,31 +1276,31 @@ def test_plugin_with_test_function(self) -> None: data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ - '', - 'doctest:HTML_TAG', - 'testsuite.test_html_tag', - 'testsuite.test_renderers', + "", + "doctest:HTML_TAG", + "testsuite.test_html_tag", + "testsuite.test_renderers", ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) - assert lines == sorted_lines(data, filenames['rendering.py']) + assert lines == sorted_lines(data, filenames["rendering.py"]) assert_context_lines("doctest:HTML_TAG", [2]) assert_context_lines("testsuite.test_html_tag", [2]) assert_context_lines("testsuite.test_renderers", [2, 5, 8, 11]) def test_multiple_plugins(self) -> None: - self.make_plugin_capitalized_testnames('plugin_tests.py') - self.make_plugin_track_render('plugin_renderers.py') + self.make_plugin_capitalized_testnames("plugin_tests.py") + self.make_plugin_track_render("plugin_renderers.py") self.make_test_files() # Enable two plugins cov = coverage.Coverage() - cov.set_option("run:plugins", ['plugin_renderers', 'plugin_tests']) + cov.set_option("run:plugins", ["plugin_renderers", "plugin_tests"]) - self.run_all_functions(cov, 'testsuite') + self.run_all_functions(cov, "testsuite") # It is important to note, that line 11 (render_bold function) is never # labeled as renderer:bold context, because it is only called from @@ -1177,18 +1312,18 @@ def test_multiple_plugins(self) -> None: data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ - '', - 'doctest:HTML_TAG', - 'renderer:paragraph', - 'renderer:span', - 'test:HTML_TAG', - 'test:RENDERERS', + "", + "doctest:HTML_TAG", + "renderer:paragraph", + "renderer:span", + "test:HTML_TAG", + "test:RENDERERS", ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) - assert lines == sorted_lines(data, filenames['rendering.py']) + assert lines == sorted_lines(data, filenames["rendering.py"]) assert_context_lines("test:HTML_TAG", [2]) assert_context_lines("test:RENDERERS", [2, 5, 8, 11]) diff --git a/tests/test_process.py b/tests/test_process.py index 3b700e533..353784574 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -37,10 +37,13 @@ class ProcessTest(CoverageTest): """Tests of the per-process behavior of coverage.py.""" def test_save_on_exit(self) -> None: - self.make_file("mycode.py", """\ + self.make_file( + "mycode.py", + """\ h = "Hello" w = "world" - """) + """, + ) self.assert_doesnt_exist(".coverage") self.run_command("coverage run mycode.py") @@ -48,25 +51,31 @@ def test_save_on_exit(self) -> None: def test_tests_dir_is_importable(self) -> None: # Checks that we can import modules from the tests directory at all! - self.make_file("mycode.py", """\ + self.make_file( + "mycode.py", + """\ import covmod1 import covmodzip1 a = 1 print('done') - """) + """, + ) self.assert_doesnt_exist(".coverage") self.add_test_modules_to_pythonpath() out = self.run_command("coverage run mycode.py") self.assert_exists(".coverage") - assert out == 'done\n' + assert out == "done\n" def test_coverage_run_envvar_is_in_coveragerun(self) -> None: # Test that we are setting COVERAGE_RUN when we run. - self.make_file("envornot.py", """\ + self.make_file( + "envornot.py", + """\ import os print(os.getenv("COVERAGE_RUN", "nope")) - """) + """, + ) self.del_environ("COVERAGE_RUN") # Regular Python doesn't have the environment variable. out = self.run_command("python envornot.py") @@ -81,7 +90,9 @@ def make_b_or_c_py(self) -> None: # "b_or_c.py b" will run 6 lines. # "b_or_c.py c" will run 7 lines. # Together, they run 8 lines. - self.make_file("b_or_c.py", """\ + self.make_file( + "b_or_c.py", + """\ import sys a = 2 if sys.argv[1] == 'b': @@ -91,18 +102,19 @@ def make_b_or_c_py(self) -> None: c2 = 7 d = 8 print('done') - """) + """, + ) def test_append_data(self) -> None: self.make_b_or_c_py() out = self.run_command("coverage run b_or_c.py b") - assert out == 'done\n' + assert out == "done\n" self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) out = self.run_command("coverage run --append b_or_c.py c") - assert out == 'done\n' + assert out == "done\n" self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) @@ -110,23 +122,26 @@ def test_append_data(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 def test_append_data_with_different_file(self) -> None: self.make_b_or_c_py() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] data_file = .mycovdata - """) + """, + ) out = self.run_command("coverage run b_or_c.py b") - assert out == 'done\n' + assert out == "done\n" self.assert_doesnt_exist(".coverage") self.assert_exists(".mycovdata") out = self.run_command("coverage run --append b_or_c.py c") - assert out == 'done\n' + assert out == "done\n" self.assert_doesnt_exist(".coverage") self.assert_exists(".mycovdata") @@ -134,13 +149,13 @@ def test_append_data_with_different_file(self) -> None: # executed. data = coverage.CoverageData(".mycovdata") data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 def test_append_can_create_a_data_file(self) -> None: self.make_b_or_c_py() out = self.run_command("coverage run --append b_or_c.py b") - assert out == 'done\n' + assert out == "done\n" self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) @@ -148,23 +163,26 @@ def test_append_can_create_a_data_file(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 6 + assert line_counts(data)["b_or_c.py"] == 6 def test_combine_with_rc(self) -> None: self.make_b_or_c_py() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] source = . parallel = true - """) + """, + ) out = self.run_command("coverage run b_or_c.py b") - assert out == 'done\n' + assert out == "done\n" self.assert_doesnt_exist(".coverage") out = self.run_command("coverage run b_or_c.py c") - assert out == 'done\n' + assert out == "done\n" self.assert_doesnt_exist(".coverage") # After two runs, there should be two .coverage.machine.123 files. @@ -182,7 +200,7 @@ def test_combine_with_rc(self) -> None: # executed. data = coverage.CoverageData() data.read() - assert line_counts(data)['b_or_c.py'] == 8 + assert line_counts(data)["b_or_c.py"] == 8 # Reporting should still work even with the .rc file out = self.run_command("coverage report") @@ -195,22 +213,30 @@ def test_combine_with_rc(self) -> None: """) def test_combine_with_aliases(self) -> None: - self.make_file("d1/x.py", """\ + self.make_file( + "d1/x.py", + """\ a = 1 b = 2 print(f"{a} {b}") - """) + """, + ) - self.make_file("d2/x.py", """\ + self.make_file( + "d2/x.py", + """\ # 1 # 2 # 3 c = 4 d = 5 print(f"{c} {d}") - """) + """, + ) - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] source = . parallel = True @@ -220,12 +246,13 @@ def test_combine_with_aliases(self) -> None: src */d1 */d2 - """) + """, + ) out = self.run_command("coverage run " + os.path.normpath("d1/x.py")) - assert out == '1 2\n' + assert out == "1 2\n" out = self.run_command("coverage run " + os.path.normpath("d2/x.py")) - assert out == '4 5\n' + assert out == "4 5\n" self.assert_file_count(".coverage.*", 2) @@ -244,16 +271,19 @@ def test_combine_with_aliases(self) -> None: summary = line_counts(data, fullpath=True) assert len(summary) == 1 actual = abs_file(list(summary.keys())[0]) - expected = abs_file('src/x.py') + expected = abs_file("src/x.py") assert expected == actual assert list(summary.values())[0] == 6 def test_erase_parallel(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] data_file = data.dat parallel = True - """) + """, + ) self.make_file("data.dat") self.make_file("data.dat.fooey") self.make_file("data.dat.gooey") @@ -267,9 +297,12 @@ def test_erase_parallel(self) -> None: def test_missing_source_file(self) -> None: # Check what happens if the source is missing when reporting happens. - self.make_file("fleeting.py", """\ + self.make_file( + "fleeting.py", + """\ s = 'goodbye, cruel world!' - """) + """, + ) self.run_command("coverage run fleeting.py") os.remove("fleeting.py") @@ -279,9 +312,12 @@ def test_missing_source_file(self) -> None: # It happens that the code paths are different for *.py and other # files, so try again with no extension. - self.make_file("fleeting", """\ + self.make_file( + "fleeting", + """\ s = 'goodbye, cruel world!' - """) + """, + ) self.run_command("coverage run fleeting") os.remove("fleeting") @@ -297,7 +333,9 @@ def test_running_missing_file(self) -> None: assert status == 1 def test_code_throws(self) -> None: - self.make_file("throw.py", """\ + self.make_file( + "throw.py", + """\ class MyException(Exception): pass @@ -308,7 +346,8 @@ def f2(): f1() f2() - """) + """, + ) # The important thing is for "coverage run" and "python" to report the # same traceback. @@ -320,13 +359,15 @@ def f2(): assert out == out2 # But also make sure that the output is what we expect. - path = python_reported_file('throw.py') + path = python_reported_file("throw.py") msg = f'File "{re.escape(path)}", line 8, in f2' assert re.search(msg, out) assert 'raise MyException("hey!")' in out def test_code_exits(self) -> None: - self.make_file("exit.py", """\ + self.make_file( + "exit.py", + """\ import sys def f1(): print("about to exit..") @@ -336,7 +377,8 @@ def f2(): f1() f2() - """) + """, + ) # The important thing is for "coverage run" and "python" to have the # same output. No traceback. @@ -348,14 +390,17 @@ def f2(): assert status == 17 def test_code_exits_no_arg(self) -> None: - self.make_file("exit_none.py", """\ + self.make_file( + "exit_none.py", + """\ import sys def f1(): print("about to exit quietly..") sys.exit() f1() - """) + """, + ) status, out = self.run_command_status("coverage run exit_none.py") status2, out2 = self.run_command_status("python exit_none.py") assert out == out2 @@ -365,7 +410,9 @@ def f1(): @pytest.mark.skipif(not hasattr(os, "fork"), reason="Can't test os.fork, it doesn't exist.") def test_fork(self) -> None: - self.make_file("fork.py", """\ + self.make_file( + "fork.py", + """\ import os print(f"parent,{os.getpid()}", flush=True) @@ -375,12 +422,13 @@ def test_fork(self) -> None: print(f"child,{os.getpid()}", flush=True) else: os.waitpid(ret, 0) - """) + """, + ) total_lines = 6 self.set_environ("COVERAGE_DEBUG_FILE", "debug.out") out = self.run_command("coverage run --debug=pid,process,trace -p fork.py") - pids = {key:int(pid) for key, pid in csv.reader(out.splitlines())} + pids = {key: int(pid) for key, pid in csv.reader(out.splitlines())} assert set(pids) == {"parent", "child"} self.assert_doesnt_exist(".coverage") @@ -415,9 +463,9 @@ def test_fork(self) -> None: ppid = pids["parent"] cpid = pids["child"] assert ppid != cpid - plines = re_lines(fr"{ppid}\.[0-9a-f]+: New process: pid={ppid}, executable", debug_text) + plines = re_lines(rf"{ppid}\.[0-9a-f]+: New process: pid={ppid}, executable", debug_text) assert len(plines) == 1 - clines = re_lines(fr"{cpid}\.[0-9a-f]+: New process: forked {ppid} -> {cpid}", debug_text) + clines = re_lines(rf"{cpid}\.[0-9a-f]+: New process: forked {ppid} -> {cpid}", debug_text) assert len(clines) == 1 reported_pids = {line.split(".")[0] for line in debug_text.splitlines()} assert len(reported_pids) == 2 @@ -425,7 +473,9 @@ def test_fork(self) -> None: @pytest.mark.skipif(not hasattr(os, "fork"), reason="Can't test os.fork, it doesn't exist.") @pytest.mark.parametrize("patch", [False, True]) def test_os_exit(self, patch: bool) -> None: - self.make_file("forky.py", """\ + self.make_file( + "forky.py", + """\ import os import tempfile import time @@ -447,8 +497,9 @@ def test_os_exit(self, patch: bool) -> None: os._exit(0) # if this exists, then we broke os._exit completely open("impossible.txt", mode="w") - """) - total_lines = 17 # don't count the last impossible.txt line + """, + ) + total_lines = 17 # don't count the last impossible.txt line if patch: self.make_file(".coveragerc", "[run]\npatch = _exit\n") self.run_command("coverage run -p forky.py") @@ -465,19 +516,28 @@ def test_os_exit(self, patch: bool) -> None: def test_warnings_during_reporting(self) -> None: # While fixing issue #224, the warnings were being printed far too # often. Make sure they're not any more. - self.make_file("hello.py", """\ + self.make_file( + "hello.py", + """\ import sys, os, the_other print("Hello") - """) - self.make_file("the_other.py", """\ + """, + ) + self.make_file( + "the_other.py", + """\ print("What?") - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] source = . xyzzy - """) + """, + ) self.run_command("coverage run hello.py") out = self.run_command("coverage html") @@ -488,15 +548,14 @@ def test_warns_if_never_run(self) -> None: # absolute path of the file will have "warning" in it, and an assertion # will fail. out = self.run_command("coverage run i_dont_exist.py", status=1) - path = python_reported_file('i_dont_exist.py') + path = python_reported_file("i_dont_exist.py") assert f"No file to run: '{path}'" in out assert "warning" not in out assert "Exception" not in out out = self.run_command("coverage run -m no_such_module", status=1) - assert ( - ("No module named no_such_module" in out) or - ("No module named 'no_such_module'" in out) + assert ("No module named no_such_module" in out) or ( + "No module named 'no_such_module'" in out ) assert "warning" not in out assert "Exception" not in out @@ -505,7 +564,9 @@ def test_warns_if_never_run(self) -> None: def test_warnings_trace_function_changed_with_threads(self) -> None: # https://github.com/nedbat/coveragepy/issues/164 - self.make_file("bug164.py", """\ + self.make_file( + "bug164.py", + """\ import threading import time @@ -516,7 +577,8 @@ def run(self): thr = MyThread() thr.start() thr.join() - """) + """, + ) out = self.run_command("coverage run --timid bug164.py") assert "Hello\n" in out @@ -524,12 +586,15 @@ def run(self): @pytest.mark.skipif(env.METACOV, reason="Can't test tracers changing during metacoverage") def test_warning_trace_function_changed(self) -> None: - self.make_file("settrace.py", """\ + self.make_file( + "settrace.py", + """\ import sys print("Hello") sys.settrace(None) print("Goodbye") - """) + """, + ) out = self.run_command("coverage run --timid settrace.py") assert "Hello\n" in out assert "Goodbye\n" in out @@ -549,7 +614,9 @@ def test_timid(self) -> None: # an environment variable set in igor.py to know whether to expect to see # the C trace function or not. - self.make_file("showtrace.py", """\ + self.make_file( + "showtrace.py", + """\ # Show the current frame's trace function, so that we can test what the # command-line options do to the trace function used. @@ -570,7 +637,8 @@ def test_timid(self) -> None: trace_name = trace_fn.__class__.__name__ print(trace_name) - """) + """, + ) # When running without coverage, no trace function py_out = self.run_command("python showtrace.py") @@ -593,26 +661,32 @@ def test_timid(self) -> None: assert timid_out == "PyTracer\n" def test_warn_preimported(self) -> None: - self.make_file("hello.py", """\ + self.make_file( + "hello.py", + """\ import goodbye import coverage cov = coverage.Coverage(include=["good*"], check_preimported=True) cov.start() print(goodbye.f()) cov.stop() - """) - self.make_file("goodbye.py", """\ + """, + ) + self.make_file( + "goodbye.py", + """\ def f(): return "Goodbye!" - """) + """, + ) goodbye_path = os.path.abspath("goodbye.py") out = self.run_command("python hello.py") assert "Goodbye!" in out msg = ( - f"CoverageWarning: Already imported a file that will be measured: {goodbye_path} " + - "(already-imported)" + f"CoverageWarning: Already imported a file that will be measured: {goodbye_path} " + + "(already-imported)" ) assert msg in out @@ -625,13 +699,16 @@ def test_lang_c(self) -> None: # with strange characters, though, because that gets the test runners # tangled up. This will isolate the concerns to the coverage.py code. # https://github.com/nedbat/coveragepy/issues/533 - self.make_file("weird_file.py", r""" + self.make_file( + "weird_file.py", + r""" globs = {} code = "a = 1\nb = 2\n" exec(compile(code, "wut\xe9\xea\xeb\xec\x01\x02.py", 'exec'), globs) print(globs['a']) print(globs['b']) - """) + """, + ) self.set_environ("LANG", "C") out = self.run_command("coverage run weird_file.py") assert out == "1\n2\n" @@ -639,12 +716,15 @@ def test_lang_c(self) -> None: def test_deprecation_warnings(self) -> None: # Test that coverage doesn't trigger deprecation warnings. # https://github.com/nedbat/coveragepy/issues/305 - self.make_file("allok.py", """\ + self.make_file( + "allok.py", + """\ import warnings warnings.simplefilter('default') import coverage print("No warnings!") - """) + """, + ) # Some of our testing infrastructure can issue warnings. # Turn it all off for the subprocess. @@ -655,11 +735,16 @@ def test_deprecation_warnings(self) -> None: def test_run_twice(self) -> None: # https://github.com/nedbat/coveragepy/issues/353 - self.make_file("foo.py", """\ + self.make_file( + "foo.py", + """\ def foo(): pass - """) - self.make_file("run_twice.py", """\ + """, + ) + self.make_file( + "run_twice.py", + """\ import sys import coverage @@ -671,16 +756,17 @@ def foo(): import foo inst.stop() inst.save() - """) + """, + ) out = self.run_command("python run_twice.py") # Remove the file location and source line from the warning. out = re.sub(r"(?m)^[\\/\w.:~_-]+:\d+: CoverageWarning: ", "f:d: CoverageWarning: ", out) out = re.sub(r"(?m)^\s+self.warn.*$\n", "", out) expected = ( - "Run 1\n" + - "Run 2\n" + - "f:d: CoverageWarning: Module foo was previously imported, but not measured " + - "(module-not-measured)\n" + "Run 1\n" + + "Run 2\n" + + "f:d: CoverageWarning: Module foo was previously imported, but not measured " + + "(module-not-measured)\n" ) assert expected == out @@ -696,7 +782,9 @@ def test_module_name(self) -> None: @pytest.mark.skipif(env.WINDOWS, reason="This test is not for Windows") def test_save_signal_usr1(self) -> None: self.assert_doesnt_exist(".coverage") - self.make_file("dummy_hello.py", """\ + self.make_file( + "dummy_hello.py", + """\ import os import signal @@ -705,7 +793,8 @@ def test_save_signal_usr1(self) -> None: os.kill(os.getpid(), signal.SIGKILL) print("Done and goodbye") - """) + """, + ) out = self.run_command( "coverage run --save-signal=USR1 dummy_hello.py", status=-signal.SIGKILL, @@ -725,6 +814,7 @@ def test_save_signal_usr1(self) -> None: TRY_EXECFILE = os.path.join(os.path.dirname(__file__), "modules/process_test/try_execfile.py") + class EnvironmentTest(CoverageTest): """Tests using try_execfile.py to test the execution environment.""" @@ -814,10 +904,13 @@ def test_coverage_run_dashm_superset_of_doubledashsource(self) -> None: # This is because process_test/__init__.py is imported while looking # for process_test.try_execfile. That import happens while setting # sys.path before start() is called. - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] disable_warnings = module-not-measured - """) + """, + ) self.add_test_modules_to_pythonpath() expected = self.run_command("python -m process_test.try_execfile") actual = self.run_command( @@ -835,10 +928,13 @@ def test_coverage_run_script_imports_doubledashsource(self) -> None: # have __file__ == "try_execfile.pyc", which throws off the comparison. # Setting dont_write_bytecode True stops the compilation to .pyc and # keeps the test working. - self.make_file("myscript", """\ + self.make_file( + "myscript", + """\ import sys; sys.dont_write_bytecode = True import process_test.try_execfile - """) + """, + ) self.add_test_modules_to_pythonpath() expected = self.run_command("python myscript") @@ -915,11 +1011,16 @@ def test_coverage_custom_script(self) -> None: # https://github.com/nedbat/coveragepy/issues/678 # If sys.path[0] isn't the Python default, then coverage.py won't # fiddle with it. - self.make_file("a/b/c/thing.py", """\ + self.make_file( + "a/b/c/thing.py", + """\ SOMETHING = "hello-xyzzy" - """) + """, + ) abc = os.path.abspath("a/b/c") - self.make_file("run_coverage.py", f"""\ + self.make_file( + "run_coverage.py", + f"""\ import sys sys.path[0:0] = [ r'{abc}', @@ -930,13 +1031,17 @@ def test_coverage_custom_script(self) -> None: if __name__ == '__main__': sys.exit(coverage.cmdline.main()) - """) - self.make_file("how_is_it.py", """\ + """, + ) + self.make_file( + "how_is_it.py", + """\ import pprint, sys pprint.pprint(sys.path) import thing print(thing.SOMETHING) - """) + """, + ) # If this test fails, it will be with "can't import thing". out = self.run_command("python run_coverage.py run how_is_it.py") assert "hello-xyzzy" in out @@ -953,7 +1058,9 @@ def test_coverage_custom_script(self) -> None: def test_bug_862(self) -> None: # This used to simulate how pyenv and pyenv-virtualenv create the # coverage executable. Now the code shows how venv does it. - self.make_file("elsewhere/bin/fake-coverage", f"""\ + self.make_file( + "elsewhere/bin/fake-coverage", + f"""\ #!{sys.executable} import re import sys @@ -961,7 +1068,8 @@ def test_bug_862(self) -> None: if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0]) sys.exit(main()) - """) + """, + ) os.chmod("elsewhere/bin/fake-coverage", stat.S_IREAD | stat.S_IEXEC) os.symlink("elsewhere", "somewhere") self.make_file("foo.py", "print('inside foo')") @@ -1004,7 +1112,9 @@ class ExcepthookTest(CoverageTest): # TODO: do we need these as process tests if we have test_execfile.py:RunFileTest? def test_excepthook(self) -> None: - self.make_file("excepthook.py", """\ + self.make_file( + "excepthook.py", + """\ import sys def excepthook(*args): @@ -1016,7 +1126,8 @@ def excepthook(*args): maybe = 1 raise RuntimeError('Error Outside') - """) + """, + ) cov_st, cov_out = self.run_command_status("coverage run excepthook.py") py_st, py_out = self.run_command_status("python excepthook.py") assert cov_st == py_st @@ -1030,14 +1141,17 @@ def excepthook(*args): data.read() print(f"{line_counts(data) = }") print(f"{data = }") - print("data.lines excepthook.py:", data.lines(os.path.abspath('excepthook.py'))) - assert line_counts(data)['excepthook.py'] == 7 + print("data.lines excepthook.py:", data.lines(os.path.abspath("excepthook.py"))) + assert line_counts(data)["excepthook.py"] == 7 - @pytest.mark.skipif(not env.CPYTHON, + @pytest.mark.skipif( + not env.CPYTHON, reason="non-CPython handles excepthook exits differently, punt for now.", ) def test_excepthook_exit(self) -> None: - self.make_file("excepthook_exit.py", """\ + self.make_file( + "excepthook_exit.py", + """\ import sys def excepthook(*args): @@ -1047,7 +1161,8 @@ def excepthook(*args): sys.excepthook = excepthook raise RuntimeError('Error Outside') - """) + """, + ) cov_st, cov_out = self.run_command_status("coverage run excepthook_exit.py") py_st, py_out = self.run_command_status("python excepthook_exit.py") assert cov_st == py_st @@ -1058,7 +1173,9 @@ def excepthook(*args): @pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.") def test_excepthook_throw(self) -> None: - self.make_file("excepthook_throw.py", """\ + self.make_file( + "excepthook_throw.py", + """\ import sys def excepthook(*args): @@ -1071,7 +1188,8 @@ def excepthook(*args): sys.excepthook = excepthook raise RuntimeError('Error Outside') - """) + """, + ) cov_out = self.run_command("coverage run excepthook_throw.py", status=1) py_out = self.run_command("python excepthook_throw.py", status=1) assert "in excepthook" in py_out @@ -1091,7 +1209,7 @@ def test_major_version_works(self) -> None: def test_wrong_alias_doesnt_work(self) -> None: # "coverage2" doesn't work on py3 - assert sys.version_info[0] == 3 # Let us know when Python 4 is out... + assert sys.version_info[0] == 3 # Let us know when Python 4 is out... badcmd = "coverage2" status, out = self.run_command_status(badcmd) assert "Code coverage for Python" not in out @@ -1103,11 +1221,14 @@ def test_specific_alias_works(self) -> None: out = self.run_command(cmd) assert "Code coverage for Python" in out - @pytest.mark.parametrize("cmd", [ - "coverage", - "coverage%d" % sys.version_info[0], - "coverage-%d.%d" % sys.version_info[:2], - ]) + @pytest.mark.parametrize( + "cmd", + [ + "coverage", + "coverage%d" % sys.version_info[0], + "coverage-%d.%d" % sys.version_info[:2], + ], + ) def test_aliases_used_in_messages(self, cmd: str) -> None: out = self.run_command(f"{cmd} foobar", status=1) assert "Unknown command: 'foobar'" in out @@ -1143,7 +1264,9 @@ class FailUnderTest(CoverageTest): def setUp(self) -> None: super().setUp() - self.make_file("forty_two_plus.py", """\ + self.make_file( + "forty_two_plus.py", + """\ # I have 42.857% (3/7) coverage! a = 1 b = 2 @@ -1152,7 +1275,8 @@ def setUp(self) -> None: c = 5 d = 6 e = 7 - """) + """, + ) self.make_data_file(lines={abs_file("forty_two_plus.py"): [2, 3, 4]}) def test_report_43_is_ok(self) -> None: @@ -1175,11 +1299,9 @@ def test_report_42p86_is_not_ok(self) -> None: def test_report_99p9_is_not_ok(self) -> None: # A file with 99.9% coverage: - self.make_file("ninety_nine_plus.py", - "a = 1\n" + - "b = 2\n" * 2000 + - "if a > 3:\n" + - " c = 4\n", + self.make_file( + "ninety_nine_plus.py", + "a = 1\n" + "b = 2\n" * 2000 + "if a > 3:\n" + " c = 4\n", ) self.make_data_file(lines={abs_file("ninety_nine_plus.py"): range(1, 2002)}) st, out = self.run_command_status("coverage report --fail-under=100") @@ -1190,9 +1312,11 @@ def test_report_99p9_is_not_ok(self) -> None: class CoverageCoreTest(CoverageTest): """Test that cores are chosen correctly.""" + # This doesn't test failure modes, only successful requests. try: from coverage.tracer import CTracer + has_ctracer = True except ImportError: has_ctracer = False @@ -1267,15 +1391,17 @@ def test_core_request_nosuchcore(self) -> None: class FailUnderNoFilesTest(CoverageTest): """Test that nothing to report results in an error exit status.""" + def test_report(self) -> None: self.make_file(".coveragerc", "[report]\nfail_under = 99\n") st, out = self.run_command_status("coverage report") - assert 'No data to report.' in out + assert "No data to report." in out assert st == 1 class FailUnderEmptyFilesTest(CoverageTest): """Test that empty files produce the proper fail_under exit status.""" + def test_report(self) -> None: self.make_file(".coveragerc", "[report]\nfail_under = 99\n") self.make_file("empty.py", "") @@ -1309,7 +1435,7 @@ def test_removing_directory(self) -> None: def test_removing_directory_with_error(self) -> None: self.make_file("bug806.py", self.BUG_806) out = self.run_command("coverage run bug806.py", status=1) - path = python_reported_file('bug806.py') + path = python_reported_file("bug806.py") # Python 3.11 adds an extra line to the traceback. # Check that the lines we expect are there. lines = textwrap.dedent(f"""\ @@ -1329,24 +1455,33 @@ class ProcessStartupTest(CoverageTest): def make_main_and_sub(self) -> None: """Create main.py and sub.py.""" # Main will run sub.py - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import os, os.path, sys ex = os.path.basename(sys.executable) os.system(ex + " sub.py") - """) + """, + ) # sub.py will write a few lines. - self.make_file("sub.py", """\ + self.make_file( + "sub.py", + """\ f = open("out.txt", "w", encoding="utf-8") f.write("Hello, world!\\n") f.close() - """) + """, + ) def test_patch_subprocess(self) -> None: self.make_main_and_sub() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] patch = subprocess - """) + """, + ) self.run_command("coverage run main.py") self.run_command("coverage combine") self.assert_exists(".coverage") @@ -1361,15 +1496,18 @@ def test_subprocess_with_pth_files(self, _create_pth_file: None) -> None: # it. self.make_main_and_sub() data = coverage.CoverageData(".mycovdata") - data.add_lines({os.path.abspath('sub.py'): range(100)}) + data.add_lines({os.path.abspath("sub.py"): range(100)}) data.write() - self.make_file("coverage.ini", """\ + self.make_file( + "coverage.ini", + """\ [run] data_file = .mycovdata - """) + """, + ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") - import main # pylint: disable=unused-import, import-error + import main # pylint: disable=unused-import, import-error with open("out.txt", encoding="utf-8") as f: assert f.read() == "Hello, world!\n" @@ -1378,15 +1516,18 @@ def test_subprocess_with_pth_files(self, _create_pth_file: None) -> None: self.assert_exists(".mycovdata") data = coverage.CoverageData(".mycovdata") data.read() - assert line_counts(data)['sub.py'] == 3 + assert line_counts(data)["sub.py"] == 3 def test_subprocess_with_pth_files_and_parallel(self, _create_pth_file: None) -> None: # https://github.com/nedbat/coveragepy/issues/492 self.make_main_and_sub() - self.make_file("coverage.ini", """\ + self.make_file( + "coverage.ini", + """\ [run] parallel = true - """) + """, + ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") self.run_command("coverage run main.py") @@ -1400,20 +1541,22 @@ def test_subprocess_with_pth_files_and_parallel(self, _create_pth_file: None) -> self.assert_exists(".coverage") data = coverage.CoverageData() data.read() - assert line_counts(data)['sub.py'] == 3 + assert line_counts(data)["sub.py"] == 3 # assert that there are *no* extra data files left over after a combine - data_files = glob.glob(os.getcwd() + '/.coverage*') + data_files = glob.glob(os.getcwd() + "/.coverage*") msg = ( - "Expected only .coverage after combine, looks like there are " + - f"extra data files that were not cleaned up: {data_files!r}" + "Expected only .coverage after combine, looks like there are " + + f"extra data files that were not cleaned up: {data_files!r}" ) assert len(data_files) == 1, msg def test_subprocess_in_directories(self) -> None: # Bug 2025: patch=subprocess didn't find data files from subdirectory # subprocesses. - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import subprocess import sys print(subprocess.check_output( @@ -1421,16 +1564,23 @@ def test_subprocess_in_directories(self) -> None: cwd="subdir", encoding="utf-8", )) - """) - self.make_file("subdir/subproc.py", """\ + """, + ) + self.make_file( + "subdir/subproc.py", + """\ with open("readme.txt", encoding="utf-8") as f: print(f.read(), end="") - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] patch = subprocess data_file = .covdata - """) + """, + ) self.make_file("subdir/readme.txt", "hello") out = self.run_command("coverage run main.py") assert out == "hello\n" @@ -1442,7 +1592,9 @@ def test_subprocess_in_directories(self) -> None: def test_subprocess_gets_nonfile_config(self) -> None: # https://github.com/nedbat/coveragepy/issues/2021 - self.make_file("subfunctions.py", """\ + self.make_file( + "subfunctions.py", + """\ import subprocess, sys def f1(): @@ -1458,12 +1610,16 @@ def f2(): subprocess.call([sys.executable, __file__, c]) else: functions[int(cases[0])]() - """) - self.make_file(".coveragerc", """\ + """, + ) + self.make_file( + ".coveragerc", + """\ [run] disable_warnings = no-sysmon patch = subprocess - """) + """, + ) out = self.run_command("coverage run --branch subfunctions.py 0 1") assert out.endswith("function 1\nfunction 2\n") self.run_command("coverage combine") @@ -1499,18 +1655,27 @@ def _clean_pth_files() -> Iterable[None]: class ExecvTest(CoverageTest): """Test that we can measure coverage in subprocesses.""" - @pytest.mark.parametrize("fname", - [base + suffix for base, suffix in itertools.product( - ["exec", "spawn"], - ["l", "le", "lp", "lpe", "v", "ve", "vp", "vpe"], - )] + @pytest.mark.parametrize( + "fname", + [ + base + suffix + for base, suffix in itertools.product( + ["exec", "spawn"], + ["l", "le", "lp", "lpe", "v", "ve", "vp", "vpe"], + ) + ], ) def test_execv_patch(self, fname: str, _clean_pth_files: None) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] patch = subprocess, execv - """) - self.make_file("main.py", f"""\ + """, + ) + self.make_file( + "main.py", + f"""\ import os, sys print("In main") args = [] @@ -1527,13 +1692,17 @@ def test_execv_patch(self, fname: str, _clean_pth_files: None) -> None: os.environ["MAINVAR"] = "the-main-var" sys.stdout.flush() os.{fname}(*args) - """) - self.make_file("other.py", """\ + """, + ) + self.make_file( + "other.py", + """\ import os, sys print(f"MAINVAR = {os.getenv('MAINVAR', 'none')}") print(f"SUBVAR = {os.getenv('SUBVAR', 'none')}") print(f"{sys.argv[1:] = }") - """) + """, + ) out = self.run_command("coverage run main.py") expected = "In main\n" @@ -1599,10 +1768,11 @@ def test_pth_and_source_work_together( ``--source`` argument. """ + def fullname(modname: str) -> str: """What is the full module name for `modname` for this test?""" if package and dashm: - return '.'.join((package, modname)) + return ".".join((package, modname)) else: return modname @@ -1611,30 +1781,41 @@ def path(basename: str) -> str: return os.path.join(package, basename) # Main will run sub.py. - self.make_file(path("main.py"), """\ + self.make_file( + path("main.py"), + """\ import %s a = 2 b = 3 - """ % fullname('sub')) + """ + % fullname("sub"), + ) if package: self.make_file(path("__init__.py"), "") # sub.py will write a few lines. - self.make_file(path("sub.py"), """\ + self.make_file( + path("sub.py"), + """\ f = open("out.txt", "w", encoding="utf-8") f.write("Hello, world!") f.close() - """) - self.make_file("coverage.ini", """\ + """, + ) + self.make_file( + "coverage.ini", + """\ [run] source = %s - """ % fullname(source)) + """ + % fullname(source), + ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") if dashm: - cmd = "python -m %s" % fullname('main') + cmd = "python -m %s" % fullname("main") else: - cmd = "python %s" % path('main.py') + cmd = "python %s" % path("main.py") self.run_command(cmd) @@ -1646,5 +1827,5 @@ def path(basename: str) -> str: data = coverage.CoverageData() data.read() summary = line_counts(data) - assert summary[source + '.py'] == 3 + assert summary[source + ".py"] == 3 assert len(summary) == 1 diff --git a/tests/test_python.py b/tests/test_python.py index 0b0361fe4..49d28fdcf 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -30,24 +30,24 @@ class GetZipBytesTest(CoverageTest): def test_get_encoded_zip_files(self, encoding: str) -> None: # See igor.py, do_zipmods, for the text of these files. zip_file = "tests/zipmods.zip" - sys.path.append(zip_file) # So we can import the files. + sys.path.append(zip_file) # So we can import the files. filename = zip_file + "/encoded_" + encoding + ".py" filename = os_sep(filename) zip_data = get_zip_bytes(filename) assert zip_data is not None zip_text = zip_data.decode(encoding) - assert 'All OK' in zip_text + assert "All OK" in zip_text # Run the code to see that we really got it encoded properly. - mod = __import__("encoded_"+encoding) + mod = __import__("encoded_" + encoding) assert mod.encoding == encoding def test_source_for_file(tmp_path: pathlib.Path) -> None: src = str(tmp_path / "a.py") assert source_for_file(src) == src - assert source_for_file(src + 'c') == src - assert source_for_file(src + 'o') == src - unknown = src + 'FOO' + assert source_for_file(src + "c") == src + assert source_for_file(src + "o") == src + unknown = src + "FOO" assert source_for_file(unknown) == unknown @@ -59,11 +59,11 @@ def test_source_for_file_windows(tmp_path: pathlib.Path) -> None: # On windows if a pyw exists, it is an acceptable source path_windows = tmp_path / "a.pyw" path_windows.write_text("", encoding="utf-8") - assert str(path_windows) == source_for_file(src + 'c') + assert str(path_windows) == source_for_file(src + "c") # If both pyw and py exist, py is preferred a_py.write_text("", encoding="utf-8") - assert source_for_file(src + 'c') == src + assert source_for_file(src + "c") == src class RunpyTest(CoverageTest): diff --git a/tests/test_regions.py b/tests/test_regions.py index 67792d6c3..1654cc96b 100644 --- a/tests/test_regions.py +++ b/tests/test_regions.py @@ -18,7 +18,8 @@ def test_code_regions() -> None: - regions = code_regions(textwrap.dedent("""\ + regions = code_regions( + textwrap.dedent("""\ # Numbers in this code are the line number. '''Module docstring''' @@ -62,29 +63,32 @@ def method_d(self): async def afunc(): x = 43 - """)) + """) + ) F = "function" C = "class" - assert sorted(regions) == sorted([ - CodeRegion(F, "MyClass.__init__", start=8, lines={9}), - CodeRegion(F, "MyClass.method_a", start=11, lines={12, 13, 21}), - CodeRegion(F, "MyClass.method_a.inmethod", start=13, lines={14, 15, 16, 18, 19}), - CodeRegion(F, "MyClass.method_a.inmethod.DeepInside.method_b", start=16, lines={17}), - CodeRegion(F, "MyClass.method_a.inmethod.DeepInside.Deeper.bb", start=19, lines={20}), - CodeRegion(F, "MyClass.InnerClass.method_c", start=25, lines={26}), - CodeRegion(F, "func", start=28, lines={29, 30, 31, 35, 36, 37, 39, 40}), - CodeRegion(F, "func.inner", start=31, lines={32, 33}), - CodeRegion(F, "func.inner.inner_inner", start=33, lines={34}), - CodeRegion(F, "func.InsideFunc.method_d", start=37, lines={38}), - CodeRegion(F, "afunc", start=42, lines={43}), - CodeRegion(C, "MyClass", start=5, lines={9, 12, 13, 14, 15, 16, 18, 19, 21}), - CodeRegion(C, "MyClass.method_a.inmethod.DeepInside", start=15, lines={17}), - CodeRegion(C, "MyClass.method_a.inmethod.DeepInside.Deeper", start=18, lines={20}), - CodeRegion(C, "MyClass.InnerClass", start=23, lines={26}), - CodeRegion(C, "func.InsideFunc", start=36, lines={38}), - ]) + assert sorted(regions) == sorted( + [ + CodeRegion(F, "MyClass.__init__", start=8, lines={9}), + CodeRegion(F, "MyClass.method_a", start=11, lines={12, 13, 21}), + CodeRegion(F, "MyClass.method_a.inmethod", start=13, lines={14, 15, 16, 18, 19}), + CodeRegion(F, "MyClass.method_a.inmethod.DeepInside.method_b", start=16, lines={17}), + CodeRegion(F, "MyClass.method_a.inmethod.DeepInside.Deeper.bb", start=19, lines={20}), + CodeRegion(F, "MyClass.InnerClass.method_c", start=25, lines={26}), + CodeRegion(F, "func", start=28, lines={29, 30, 31, 35, 36, 37, 39, 40}), + CodeRegion(F, "func.inner", start=31, lines={32, 33}), + CodeRegion(F, "func.inner.inner_inner", start=33, lines={34}), + CodeRegion(F, "func.InsideFunc.method_d", start=37, lines={38}), + CodeRegion(F, "afunc", start=42, lines={43}), + CodeRegion(C, "MyClass", start=5, lines={9, 12, 13, 14, 15, 16, 18, 19, 21}), + CodeRegion(C, "MyClass.method_a.inmethod.DeepInside", start=15, lines={17}), + CodeRegion(C, "MyClass.method_a.inmethod.DeepInside.Deeper", start=18, lines={20}), + CodeRegion(C, "MyClass.InnerClass", start=23, lines={26}), + CodeRegion(C, "func.InsideFunc", start=36, lines={38}), + ] + ) def test_real_code_regions() -> None: @@ -96,11 +100,9 @@ def test_real_code_regions() -> None: regions = code_regions(source) for kind in ["function", "class"]: kind_regions = [reg for reg in regions if reg.kind == kind] - line_counts = collections.Counter( - lno for reg in kind_regions for lno in reg.lines - ) + line_counts = collections.Counter(lno for reg in kind_regions for lno in reg.lines) overlaps = [line for line, count in line_counts.items() if count > 1] - if overlaps: # pragma: only failure + if overlaps: # pragma: only failure print( f"{kind.title()} overlaps in {source_file.relative_to(Path.cwd())}: " + f"{overlaps}" diff --git a/tests/test_report.py b/tests/test_report.py index a24578457..e9d35bb32 100644 --- a/tests/test_report.py +++ b/tests/test_report.py @@ -34,18 +34,21 @@ class SummaryTest(UsingModulesMixin, CoverageTest): def make_mycode(self) -> None: """Make the mycode.py file when needed.""" - self.make_file("mycode.py", """\ + self.make_file( + "mycode.py", + """\ import covmod1 import covmodzip1 a = 1 print('done') - """) + """, + ) def test_report(self) -> None: self.make_mycode() cov = coverage.Coverage() self.start_import_stop(cov, "mycode") - assert self.stdout() == 'done\n' + assert self.stdout() == "done\n" report = self.get_report(cov) # Name Stmts Miss Cover @@ -150,10 +153,13 @@ def test_report_include_relative_files_and_path(self) -> None: Ref: https://github.com/nedbat/coveragepy/issues/1604 """ self.make_mycode() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] relative_files = true - """) + """, + ) self.make_file("submodule/mycode.py", "import mycode") cov = coverage.Coverage() @@ -171,10 +177,13 @@ def test_report_include_relative_files_and_path(self) -> None: def test_report_include_relative_files_and_wildcard_path(self) -> None: self.make_mycode() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] relative_files = true - """) + """, + ) self.make_file("submodule/mycode.py", "import nested.submodule.mycode") self.make_file("nested/submodule/mycode.py", "import mycode") @@ -199,11 +208,14 @@ def test_omit_files_here(self) -> None: # https://github.com/nedbat/coveragepy/issues/1407 self.make_file("foo.py", "") self.make_file("bar/bar.py", "") - self.make_file("tests/test_baz.py", """\ + self.make_file( + "tests/test_baz.py", + """\ def test_foo(): assert True test_foo() - """) + """, + ) self.run_command("coverage run --source=. --omit='./*.py' -m tests.test_baz") report = self.report_from_command("coverage report") @@ -221,13 +233,16 @@ def test_foo(): def test_run_source_vs_report_include(self) -> None: # https://github.com/nedbat/coveragepy/issues/621 - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] source = . [report] include = mod/*,tests/* - """) + """, + ) # It should be OK to use that configuration. cov = coverage.Coverage() with self.assert_warnings(cov, []): @@ -238,13 +253,16 @@ def test_run_omit_vs_report_omit(self) -> None: # https://github.com/nedbat/coveragepy/issues/622 # report:omit shouldn't clobber run:omit. self.make_mycode() - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [run] omit = */covmodzip1.py [report] omit = */covmod1.py - """) + """, + ) self.add_test_modules_to_pythonpath() self.run_command("coverage run mycode.py") @@ -256,16 +274,19 @@ def test_run_omit_vs_report_omit(self) -> None: assert "covmodzip1.py" not in files def test_report_branches(self) -> None: - self.make_file("mybranch.py", """\ + self.make_file( + "mybranch.py", + """\ def branch(x): if x: print("x") return x branch(1) - """) + """, + ) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "mybranch") - assert self.stdout() == 'x\n' + assert self.stdout() == "x\n" report = self.get_report(cov) # Name Stmts Miss Branch BrPart Cover @@ -278,7 +299,9 @@ def branch(x): assert self.last_line_squeezed(report) == "TOTAL 5 0 2 1 86%" def test_report_show_missing(self) -> None: - self.make_file("mymissing.py", """\ + self.make_file( + "mymissing.py", + """\ def missing(x, y): if x: print("x") @@ -293,10 +316,11 @@ def missing(x, y): pass return x missing(0, 1) - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "mymissing") - assert self.stdout() == 'y\nz\n' + assert self.stdout() == "y\nz\n" report = self.get_report(cov, show_missing=True) # Name Stmts Miss Cover Missing @@ -311,17 +335,20 @@ def missing(x, y): assert squeezed[4] == "TOTAL 14 3 79%" def test_report_show_missing_branches(self) -> None: - self.make_file("mybranch.py", """\ + self.make_file( + "mybranch.py", + """\ def branch(x, y): if x: print("x") if y: print("y") branch(1, 1) - """) + """, + ) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "mybranch") - assert self.stdout() == 'x\ny\n' + assert self.stdout() == "x\ny\n" report = self.get_report(cov, show_missing=True) # Name Stmts Miss Branch BrPart Cover Missing @@ -336,10 +363,15 @@ def branch(x, y): assert squeezed[4] == "TOTAL 6 0 4 2 80%" def test_report_show_missing_branches_and_lines(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import mybranch - """) - self.make_file("mybranch.py", """\ + """, + ) + self.make_file( + "mybranch.py", + """\ def branch(x, y, z): if x: print("x") @@ -350,34 +382,41 @@ def branch(x, y, z): print("z") return x branch(1, 1, 0) - """) + """, + ) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") - assert self.stdout() == 'x\ny\n' + assert self.stdout() == "x\ny\n" report_lines = self.get_report(cov, squeeze=False, show_missing=True).splitlines() expected = [ - 'Name Stmts Miss Branch BrPart Cover Missing', - '---------------------------------------------------------', - 'main.py 1 0 0 0 100%', - 'mybranch.py 10 2 8 3 61% 2->4, 4->6, 7-8', - '---------------------------------------------------------', - 'TOTAL 11 2 8 3 63%', + "Name Stmts Miss Branch BrPart Cover Missing", + "---------------------------------------------------------", + "main.py 1 0 0 0 100%", + "mybranch.py 10 2 8 3 61% 2->4, 4->6, 7-8", + "---------------------------------------------------------", + "TOTAL 11 2 8 3 63%", ] assert expected == report_lines def test_report_skip_covered_no_branches(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import not_covered def normal(): print("z") normal() - """) - self.make_file("not_covered.py", """\ + """, + ) + self.make_file( + "not_covered.py", + """\ def not_covered(): print("n") - """) + """, + ) # --fail-under is handled by cmdline.py, use real commands. out = self.run_command("coverage run main.py") assert out == "z\n" @@ -399,7 +438,9 @@ def not_covered(): assert self.last_command_status == 0 def test_report_skip_covered_branches(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import not_covered, covered def normal(z): @@ -407,18 +448,25 @@ def normal(z): print("z") normal(True) normal(False) - """) - self.make_file("not_covered.py", """\ + """, + ) + self.make_file( + "not_covered.py", + """\ def not_covered(n): if n: print("n") not_covered(True) - """) - self.make_file("covered.py", """\ + """, + ) + self.make_file( + "covered.py", + """\ def foo(): pass foo() - """) + """, + ) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" @@ -439,7 +487,9 @@ def foo(): assert squeezed[6] == "2 files skipped due to complete coverage." def test_report_skip_covered_branches_with_totals(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import not_covered import also_not_run @@ -448,17 +498,24 @@ def normal(z): print("z") normal(True) normal(False) - """) - self.make_file("not_covered.py", """\ + """, + ) + self.make_file( + "not_covered.py", + """\ def not_covered(n): if n: print("n") not_covered(True) - """) - self.make_file("also_not_run.py", """\ + """, + ) + self.make_file( + "also_not_run.py", + """\ def does_not_appear_in_this_film(ni): print("Ni!") - """) + """, + ) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" @@ -481,11 +538,14 @@ def does_not_appear_in_this_film(ni): assert squeezed[7] == "1 file skipped due to complete coverage." def test_report_skip_covered_all_files_covered(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ def foo(): pass foo() - """) + """, + ) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "" @@ -511,13 +571,13 @@ def foo(): assert self.line_count(report) == 5, report assert report.split("\n")[0] == ( - '| Name | Stmts | Miss | Branch | BrPart | Cover |' + "| Name | Stmts | Miss | Branch | BrPart | Cover |" ) assert report.split("\n")[1] == ( - '|---------- | -------: | -------: | -------: | -------: | -------: |' + "|---------- | -------: | -------: | -------: | -------: | -------: |" ) assert report.split("\n")[2] == ( - '| **TOTAL** | **3** | **0** | **0** | **0** | **100%** |' + "| **TOTAL** | **3** | **0** | **0** | **0** | **100%** |" ) squeezed = self.squeezed_lines(report) assert squeezed[4] == "1 file skipped due to complete coverage." @@ -526,11 +586,14 @@ def foo(): assert total == "100\n" def test_report_skip_covered_longfilename(self) -> None: - self.make_file("long_______________filename.py", """\ + self.make_file( + "long_______________filename.py", + """\ def foo(): pass foo() - """) + """, + ) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "long_______________filename") assert self.stdout() == "" @@ -556,13 +619,16 @@ def test_report_skip_covered_no_data(self) -> None: self.assert_doesnt_exist(".coverage") def test_report_skip_empty(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ import submodule def normal(): print("z") normal() - """) + """, + ) self.make_file("submodule/__init__.py", "") cov = coverage.Coverage() self.start_import_stop(cov, "main") @@ -601,12 +667,17 @@ def test_report_skip_empty_no_data(self) -> None: assert report.split("\n")[4] == "1 empty file skipped." def test_report_precision(self) -> None: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [report] precision = 3 omit = */site-packages/* - """) - self.make_file("main.py", """\ + """, + ) + self.make_file( + "main.py", + """\ import not_covered, covered def normal(z): @@ -614,18 +685,25 @@ def normal(z): print("z") normal(True) normal(False) - """) - self.make_file("not_covered.py", """\ + """, + ) + self.make_file( + "not_covered.py", + """\ def not_covered(n): if n: print("n") not_covered(True) - """) - self.make_file("covered.py", """\ + """, + ) + self.make_file( + "covered.py", + """\ def foo(): pass foo() - """) + """, + ) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" @@ -646,11 +724,14 @@ def foo(): assert squeezed[6] == "TOTAL 13 0 4 1 94.118%" def test_report_precision_all_zero(self) -> None: - self.make_file("not_covered.py", """\ + self.make_file( + "not_covered.py", + """\ def not_covered(n): if n: print("n") - """) + """, + ) self.make_file("empty.py", "") cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "empty") @@ -669,16 +750,22 @@ def not_covered(n): assert "TOTAL 3 3 0.000000%" in report def test_report_module_docstrings(self) -> None: - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ # Line 1 '''Line 2 docstring.''' import other a = 4 - """) - self.make_file("other.py", """\ + """, + ) + self.make_file( + "other.py", + """\ '''Line 1''' a = 2 - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "main") report = self.get_report(cov) @@ -713,11 +800,11 @@ def test_accented_directory(self) -> None: self.make_file("\xe2/accented.py", "print('accented')") self.make_data_file(lines={abs_file("\xe2/accented.py"): [1]}) report_expected = ( - "Name Stmts Miss Cover\n" + - "-----------------------------------\n" + - "\xe2/accented.py 1 0 100%\n" + - "-----------------------------------\n" + - "TOTAL 1 0 100%\n" + "Name Stmts Miss Cover\n" + + "-----------------------------------\n" + + "\xe2/accented.py 1 0 100%\n" + + "-----------------------------------\n" + + "TOTAL 1 0 100%\n" ) cov = coverage.Coverage() cov.load() @@ -766,7 +853,9 @@ def test_dothtml_not_python(self) -> None: self.get_report(cov, morfs=["mycode.html"]) def test_report_no_extension(self) -> None: - self.make_file("xxx", """\ + self.make_file( + "xxx", + """\ # This is a python file though it doesn't look like it, like a main script. a = b = c = d = 0 a = 3 @@ -775,7 +864,8 @@ def test_report_no_extension(self) -> None: c = 6 d = 7 print(f"xxx: {a} {b} {c} {d}") - """) + """, + ) self.make_data_file(lines={abs_file("xxx"): [2, 3, 4, 5, 7, 8]}) cov = coverage.Coverage() cov.load() @@ -783,13 +873,16 @@ def test_report_no_extension(self) -> None: assert self.last_line_squeezed(report) == "TOTAL 7 1 86%" def test_report_with_chdir(self) -> None: - self.make_file("chdir.py", """\ + self.make_file( + "chdir.py", + """\ import os print("Line One") os.chdir("subdir") print("Line Two") print(open("something", encoding="utf-8").read()) - """) + """, + ) self.make_file("subdir/something", "hello") out = self.run_command("coverage run --source=. chdir.py") assert out == "Line One\nLine Two\nhello\n" @@ -800,16 +893,22 @@ def test_report_with_chdir(self) -> None: def test_bug_156_file_not_run_should_be_zero(self) -> None: # https://github.com/nedbat/coveragepy/issues/156 - self.make_file("mybranch.py", """\ + self.make_file( + "mybranch.py", + """\ def branch(x): if x: print("x") return x branch(1) - """) - self.make_file("main.py", """\ + """, + ) + self.make_file( + "main.py", + """\ print("y") - """) + """, + ) cov = coverage.Coverage(branch=True, source=["."]) self.start_import_stop(cov, "main") report = self.get_report(cov).splitlines() @@ -840,17 +939,23 @@ def test_bug_203_mixed_case_listed_twice(self) -> None: @pytest.mark.skipif(not env.WINDOWS, reason=".pyw files are only on Windows.") def test_pyw_files(self) -> None: # https://github.com/nedbat/coveragepy/issues/261 - self.make_file("start.pyw", """\ + self.make_file( + "start.pyw", + """\ import mod print("In start.pyw") - """) - self.make_file("mod.pyw", """\ + """, + ) + self.make_file( + "mod.pyw", + """\ print("In mod.pyw") - """) + """, + ) cov = coverage.Coverage() # start_import_stop can't import the .pyw file, so use the long form. with cov.collect(): - import start # pylint: disable=import-error, unused-import + import start # pylint: disable=import-error, unused-import report = self.get_report(cov) assert "NoSource" not in report @@ -919,7 +1024,9 @@ def test_empty_files(self) -> None: assert "| 0 | 0 | 0 | 0 | 100% |" in report def test_markdown_with_missing(self) -> None: - self.make_file("mymissing.py", """\ + self.make_file( + "mymissing.py", + """\ def missing(x, y): if x: print("x") @@ -934,10 +1041,11 @@ def missing(x, y): pass return x missing(0, 1) - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "mymissing") - assert self.stdout() == 'y\nz\n' + assert self.stdout() == "y\nz\n" report = self.get_report(cov, squeeze=False, output_format="markdown", show_missing=True) # | Name | Stmts | Miss | Cover | Missing | @@ -954,14 +1062,17 @@ def missing(x, y): assert self.get_report(cov, output_format="total", precision=4) == "78.5714\n" def test_bug_1524(self) -> None: - self.make_file("bug1524.py", """\ + self.make_file( + "bug1524.py", + """\ class Mine: @property def thing(self) -> int: return 17 print(Mine().thing) - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "bug1524") assert self.stdout() == "17\n" @@ -975,7 +1086,9 @@ class ReportingReturnValueTest(CoverageTest): def run_coverage(self) -> Coverage: """Run coverage on doit.py and return the coverage object.""" - self.make_file("doit.py", """\ + self.make_file( + "doit.py", + """\ a = 1 b = 2 c = 3 @@ -983,7 +1096,8 @@ def run_coverage(self) -> Coverage: if a > 10: f = 6 g = 7 - """) + """, + ) cov = coverage.Coverage() self.start_import_stop(cov, "doit") @@ -1055,7 +1169,7 @@ def test_test_data(self) -> None: # TOTAL 586 386 34% lines = report.splitlines()[2:-2] assert len(lines) == 3 - nums = [list(map(int, l.replace('%', '').split()[1:])) for l in lines] + nums = [list(map(int, l.replace("%", "").split()[1:])) for l in lines] # [ # [339, 155, 54], # [ 13, 3, 77], @@ -1068,14 +1182,14 @@ def test_test_data(self) -> None: def test_defaults(self) -> None: """Run the report with no configuration options.""" report = self.get_summary_text() - assert 'Missing' not in report - assert 'Branch' not in report + assert "Missing" not in report + assert "Branch" not in report def test_print_missing(self) -> None: """Run the report printing the missing lines.""" - report = self.get_summary_text(('report:show_missing', True)) - assert 'Missing' in report - assert 'Branch' not in report + report = self.get_summary_text(("report:show_missing", True)) + assert "Missing" in report + assert "Branch" not in report def assert_ordering(self, text: str, *words: str) -> None: """Assert that the `words` appear in order in `text`.""" @@ -1091,42 +1205,42 @@ def test_default_sort_report(self) -> None: def test_sort_report_by_name(self) -> None: # Sort the text report explicitly by the Name column. - report = self.get_summary_text(('report:sort', 'Name')) + report = self.get_summary_text(("report:sort", "Name")) self.assert_ordering(report, "file1.py", "file2.py", "file10.py") def test_sort_report_by_stmts(self) -> None: # Sort the text report by the Stmts column. - report = self.get_summary_text(('report:sort', 'Stmts')) + report = self.get_summary_text(("report:sort", "Stmts")) self.assert_ordering(report, "file2.py", "file10.py", "file1.py") def test_sort_report_by_missing(self) -> None: # Sort the text report by the Missing column. - report = self.get_summary_text(('report:sort', 'Miss')) + report = self.get_summary_text(("report:sort", "Miss")) self.assert_ordering(report, "file2.py", "file1.py", "file10.py") def test_sort_report_by_cover(self) -> None: # Sort the text report by the Cover column. - report = self.get_summary_text(('report:sort', 'Cover')) + report = self.get_summary_text(("report:sort", "Cover")) self.assert_ordering(report, "file10.py", "file1.py", "file2.py") def test_sort_report_by_cover_plus(self) -> None: # Sort the text report by the Cover column, including the explicit + sign. - report = self.get_summary_text(('report:sort', '+Cover')) + report = self.get_summary_text(("report:sort", "+Cover")) self.assert_ordering(report, "file10.py", "file1.py", "file2.py") def test_sort_report_by_cover_reversed(self) -> None: # Sort the text report by the Cover column reversed. - report = self.get_summary_text(('report:sort', '-Cover')) + report = self.get_summary_text(("report:sort", "-Cover")) self.assert_ordering(report, "file2.py", "file1.py", "file10.py") def test_sort_report_by_invalid_option(self) -> None: # Sort the text report by a nonsense column. msg = "Invalid sorting option: 'Xyzzy'" with pytest.raises(ConfigError, match=msg): - self.get_summary_text(('report:sort', 'Xyzzy')) + self.get_summary_text(("report:sort", "Xyzzy")) def test_report_with_invalid_format(self) -> None: # Ask for an invalid format. msg = "Unknown report format choice: 'xyzzy'" with pytest.raises(ConfigError, match=msg): - self.get_summary_text(('report:format', 'xyzzy')) + self.get_summary_text(("report:format", "xyzzy")) diff --git a/tests/test_report_common.py b/tests/test_report_common.py index 4583fe17d..e5eb428f5 100644 --- a/tests/test_report_common.py +++ b/tests/test_report_common.py @@ -48,27 +48,32 @@ def make_files(self, data: str, settings: bool = False) -> None: ) if settings: - self.make_file(".coveragerc", """\ + self.make_file( + ".coveragerc", + """\ [paths] source = src ver1 ver2 - """) + """, + ) def test_map_paths_during_line_report_without_setting(self) -> None: self.make_files(data="line") cov = coverage.Coverage() cov.load() cov.report(show_missing=True) - expected = textwrap.dedent(os_sep("""\ + expected = textwrap.dedent( + os_sep("""\ Name Stmts Miss Cover Missing ----------------------------------------------- ver1/program.py 6 2 67% 4, 6 ver2/program.py 6 2 67% 2, 6 ----------------------------------------------- TOTAL 12 4 67% - """)) + """) + ) assert expected == self.stdout() def test_map_paths_during_line_report(self) -> None: @@ -76,13 +81,15 @@ def test_map_paths_during_line_report(self) -> None: cov = coverage.Coverage() cov.load() cov.report(show_missing=True) - expected = textwrap.dedent(os_sep("""\ + expected = textwrap.dedent( + os_sep("""\ Name Stmts Miss Cover Missing ---------------------------------------------- src/program.py 6 1 83% 6 ---------------------------------------------- TOTAL 6 1 83% - """)) + """) + ) assert expected == self.stdout() def test_map_paths_during_branch_report_without_setting(self) -> None: @@ -90,14 +97,16 @@ def test_map_paths_during_branch_report_without_setting(self) -> None: cov = coverage.Coverage(branch=True) cov.load() cov.report(show_missing=True) - expected = textwrap.dedent(os_sep("""\ + expected = textwrap.dedent( + os_sep("""\ Name Stmts Miss Branch BrPart Cover Missing ------------------------------------------------------------- ver1/program.py 6 2 6 3 58% 1->3, 4, 6 ver2/program.py 6 2 6 3 58% 2, 3->5, 6 ------------------------------------------------------------- TOTAL 12 4 12 6 58% - """)) + """) + ) assert expected == self.stdout() def test_map_paths_during_branch_report(self) -> None: @@ -105,13 +114,15 @@ def test_map_paths_during_branch_report(self) -> None: cov = coverage.Coverage(branch=True) cov.load() cov.report(show_missing=True) - expected = textwrap.dedent(os_sep("""\ + expected = textwrap.dedent( + os_sep("""\ Name Stmts Miss Branch BrPart Cover Missing ------------------------------------------------------------ src/program.py 6 1 6 1 83% 6 ------------------------------------------------------------ TOTAL 6 1 6 1 83% - """)) + """) + ) assert expected == self.stdout() def test_map_paths_during_annotate(self) -> None: @@ -144,8 +155,10 @@ def test_map_paths_during_json_report(self) -> None: cov = coverage.Coverage() cov.load() cov.json_report() + def os_sepj(s: str) -> str: return os_sep(s).replace("\\", r"\\") + contains("coverage.json", os_sepj("src/program.py")) doesnt_contain("coverage.json", os_sepj("ver1/program.py"), os_sepj("ver2/program.py")) @@ -183,17 +196,23 @@ class ReportWithJinjaTest(CoverageTest): def make_files(self) -> None: """Create test files: two Jinja templates, and data from rendering them.""" # A Jinja2 file that is syntactically acceptable Python (though it wont run). - self.make_file("good.j2", """\ + self.make_file( + "good.j2", + """\ {{ data }} line2 line3 - """) + """, + ) # A Jinja2 file that is a Python syntax error. - self.make_file("bad.j2", """\ + self.make_file( + "bad.j2", + """\ This is data: {{ data }}. line 2 line 3 - """) + """, + ) self.make_data_file( lines={ abs_file("good.j2"): [1, 3, 5, 7, 9], @@ -220,7 +239,9 @@ def test_html(self) -> None: cov = coverage.Coverage() cov.load() cov.html_report() - contains("htmlcov/index.html", """\ + contains( + "htmlcov/index.html", + """\ good.j2 @@ -239,7 +260,8 @@ def test_xml(self) -> None: cov.load() cov.xml_report() contains("coverage.xml", 'filename="good.j2"') - contains("coverage.xml", + contains( + "coverage.xml", '', '', '', @@ -252,11 +274,12 @@ def test_json(self) -> None: cov = coverage.Coverage() cov.load() cov.json_report() - contains("coverage.json", + contains( + "coverage.json", # Notice the .json report claims lines in good.j2 executed that # don't even exist in good.j2... - '"files": {"good.j2": {"executed_lines": [1, 3, 5, 7, 9], ' + - '"summary": {"covered_lines": 2, "num_statements": 3', + '"files": {"good.j2": {"executed_lines": [1, 3, 5, 7, 9], ' + + '"summary": {"covered_lines": 2, "num_statements": 3', ) doesnt_contain("coverage.json", "bad.j2") diff --git a/tests/test_results.py b/tests/test_results.py index fefe46baf..57930a49d 100644 --- a/tests/test_results.py +++ b/tests/test_results.py @@ -51,25 +51,31 @@ def test_sum(self) -> None: assert n3.n_missing == 28 assert math.isclose(n3.pc_covered, 86.666666666) - @pytest.mark.parametrize("kwargs, res", [ - (dict(n_files=1, n_statements=1000, n_missing=0), "100"), - (dict(n_files=1, n_statements=1000, n_missing=1), "99"), - (dict(n_files=1, n_statements=1000, n_missing=999), "1"), - (dict(n_files=1, n_statements=1000, n_missing=1000), "0"), - (dict(precision=1, n_files=1, n_statements=10000, n_missing=0), "100.0"), - (dict(precision=1, n_files=1, n_statements=10000, n_missing=1), "99.9"), - (dict(precision=1, n_files=1, n_statements=10000, n_missing=9999), "0.1"), - (dict(precision=1, n_files=1, n_statements=10000, n_missing=10000), "0.0"), - ]) + @pytest.mark.parametrize( + "kwargs, res", + [ + (dict(n_files=1, n_statements=1000, n_missing=0), "100"), + (dict(n_files=1, n_statements=1000, n_missing=1), "99"), + (dict(n_files=1, n_statements=1000, n_missing=999), "1"), + (dict(n_files=1, n_statements=1000, n_missing=1000), "0"), + (dict(precision=1, n_files=1, n_statements=10000, n_missing=0), "100.0"), + (dict(precision=1, n_files=1, n_statements=10000, n_missing=1), "99.9"), + (dict(precision=1, n_files=1, n_statements=10000, n_missing=9999), "0.1"), + (dict(precision=1, n_files=1, n_statements=10000, n_missing=10000), "0.0"), + ], + ) def test_pc_covered_str(self, kwargs: dict[str, int], res: str) -> None: assert Numbers(**kwargs).pc_covered_str == res - @pytest.mark.parametrize("prec, pc, res", [ - (0, 47.87, "48"), - (1, 47.87, "47.9"), - (0, 99.995, "99"), - (2, 99.99995, "99.99"), - ]) + @pytest.mark.parametrize( + "prec, pc, res", + [ + (0, 47.87, "48"), + (1, 47.87, "47.9"), + (0, 99.995, "99"), + (2, 99.99995, "99.99"), + ], + ) def test_display_covered(self, prec: int, pc: float, res: str) -> None: assert display_covered(pc, prec) == res @@ -78,37 +84,44 @@ def test_covered_ratio(self) -> None: assert n.ratio_covered == (153, 200) n = Numbers( - n_files=1, n_statements=200, n_missing=47, - n_branches=10, n_missing_branches=3, n_partial_branches=1000, + n_files=1, + n_statements=200, + n_missing=47, + n_branches=10, + n_missing_branches=3, + n_partial_branches=1000, ) assert n.ratio_covered == (160, 210) -@pytest.mark.parametrize("total, fail_under, precision, result", [ - # fail_under==0 means anything is fine! - (0, 0, 0, False), - (0.001, 0, 0, False), - # very small fail_under is possible to fail. - (0.001, 0.01, 0, True), - # Rounding should work properly. - (42.1, 42, 0, False), - (42.1, 43, 0, True), - (42.857, 42, 0, False), - (42.857, 43, 0, False), - (42.857, 44, 0, True), - (42.857, 42.856, 3, False), - (42.857, 42.858, 3, True), - # If you don't specify precision, your fail-under is rounded. - (42.857, 42.856, 0, False), - # Values near 100 should only be treated as 100 if they are 100. - (99.8, 100, 0, True), - (100.0, 100, 0, False), - (99.8, 99.7, 1, False), - (99.88, 99.90, 2, True), - (99.999, 100, 1, True), - (99.999, 100, 2, True), - (99.999, 100, 3, True), -]) +@pytest.mark.parametrize( + "total, fail_under, precision, result", + [ + # fail_under==0 means anything is fine! + (0, 0, 0, False), + (0.001, 0, 0, False), + # very small fail_under is possible to fail. + (0.001, 0.01, 0, True), + # Rounding should work properly. + (42.1, 42, 0, False), + (42.1, 43, 0, True), + (42.857, 42, 0, False), + (42.857, 43, 0, False), + (42.857, 44, 0, True), + (42.857, 42.856, 3, False), + (42.857, 42.858, 3, True), + # If you don't specify precision, your fail-under is rounded. + (42.857, 42.856, 0, False), + # Values near 100 should only be treated as 100 if they are 100. + (99.8, 100, 0, True), + (100.0, 100, 0, False), + (99.8, 99.7, 1, False), + (99.88, 99.90, 2, True), + (99.999, 100, 1, True), + (99.999, 100, 2, True), + (99.999, 100, 3, True), + ], +) def test_should_fail_under(total: float, fail_under: float, precision: int, result: bool) -> None: assert should_fail_under(float(total), float(fail_under), precision) == result @@ -118,15 +131,26 @@ def test_should_fail_under_invalid_value() -> None: should_fail_under(100.0, 101, 0) -@pytest.mark.parametrize("statements, lines, result", [ - ({1,2,3,4,5,10,11,12,13,14}, {1,2,5,10,11,13,14}, "1-2, 5-11, 13-14"), - ([1,2,3,4,5,10,11,12,13,14,98,99], [1,2,5,10,11,13,14,99], "1-2, 5-11, 13-14, 99"), - ([1,2,3,4,98,99,100,101,102,103,104], [1,2,99,102,103,104], "1-2, 99, 102-104"), - ([17], [17], "17"), - ([90,91,92,93,94,95], [90,91,92,93,94,95], "90-95"), - ([1, 2, 3, 4, 5], [], ""), - ([1, 2, 3, 4, 5], [4], "4"), -]) +@pytest.mark.parametrize( + "statements, lines, result", + [ + ({1, 2, 3, 4, 5, 10, 11, 12, 13, 14}, {1, 2, 5, 10, 11, 13, 14}, "1-2, 5-11, 13-14"), + ( + [1, 2, 3, 4, 5, 10, 11, 12, 13, 14, 98, 99], + [1, 2, 5, 10, 11, 13, 14, 99], + "1-2, 5-11, 13-14, 99", + ), + ( + [1, 2, 3, 4, 98, 99, 100, 101, 102, 103, 104], + [1, 2, 99, 102, 103, 104], + "1-2, 99, 102-104", + ), + ([17], [17], "17"), + ([90, 91, 92, 93, 94, 95], [90, 91, 92, 93, 94, 95], "90-95"), + ([1, 2, 3, 4, 5], [], ""), + ([1, 2, 3, 4, 5], [4], "4"), + ], +) def test_format_lines( statements: Iterable[TLineNo], lines: Iterable[TLineNo], @@ -135,26 +159,29 @@ def test_format_lines( assert format_lines(statements, lines) == result -@pytest.mark.parametrize("statements, lines, arcs, result", [ - ( - {1,2,3,4,5,10,11,12,13,14}, - {1,2,5,10,11,13,14}, - (), - "1-2, 5-11, 13-14", - ), - ( - [1,2,3,4,5,10,11,12,13,14,98,99], - [1,2,5,10,11,13,14,99], - [(3, [4]), (5, [10, 11]), (98, [100, -1])], - "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99", - ), - ( - [1,2,3,4,98,99,100,101,102,103,104], - [1,2,99,102,103,104], - [(3, [4]), (104, [-1])], - "1-2, 3->4, 99, 102-104", - ), -]) +@pytest.mark.parametrize( + "statements, lines, arcs, result", + [ + ( + {1, 2, 3, 4, 5, 10, 11, 12, 13, 14}, + {1, 2, 5, 10, 11, 13, 14}, + (), + "1-2, 5-11, 13-14", + ), + ( + [1, 2, 3, 4, 5, 10, 11, 12, 13, 14, 98, 99], + [1, 2, 5, 10, 11, 13, 14, 99], + [(3, [4]), (5, [10, 11]), (98, [100, -1])], + "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99", + ), + ( + [1, 2, 3, 4, 98, 99, 100, 101, 102, 103, 104], + [1, 2, 99, 102, 103, 104], + [(3, [4]), (104, [-1])], + "1-2, 3->4, 99, 102-104", + ), + ], +) def test_format_lines_with_arcs( statements: Iterable[TLineNo], lines: Iterable[TLineNo], diff --git a/tests/test_setup.py b/tests/test_setup.py index c0cfa7eb7..5082a0905 100644 --- a/tests/test_setup.py +++ b/tests/test_setup.py @@ -25,7 +25,7 @@ class SetupPyTest(CoverageTest): def setUp(self) -> None: super().setUp() # Force the most restrictive interpretation. - self.set_environ('LC_ALL', 'C') + self.set_environ("LC_ALL", "C") def test_metadata(self) -> None: status, output = self.run_command_status( @@ -45,16 +45,16 @@ def test_metadata(self) -> None: def test_more_metadata(self) -> None: # Let's be sure we pick up our own setup.py # CoverageTest restores the original sys.path for us. - sys.path.insert(0, '') + sys.path.insert(0, "") from setup import setup_args - classifiers = cast(list[str], setup_args['classifiers']) + classifiers = cast(list[str], setup_args["classifiers"]) assert len(classifiers) > 7 assert classifiers[-1].startswith("Development Status ::") assert "Programming Language :: Python :: %d" % sys.version_info[:1] in classifiers assert "Programming Language :: Python :: %d.%d" % sys.version_info[:2] in classifiers - long_description = cast(str, setup_args['long_description']).splitlines() + long_description = cast(str, setup_args["long_description"]).splitlines() assert len(long_description) > 7 assert long_description[0].strip() != "" assert long_description[-1].strip() != "" diff --git a/tests/test_sqlitedb.py b/tests/test_sqlitedb.py index 044c801db..617eb4568 100644 --- a/tests/test_sqlitedb.py +++ b/tests/test_sqlitedb.py @@ -22,6 +22,7 @@ insert into name (first, last) values ("pablo", "picasso"); """ + class SqliteDbTest(CoverageTest): """Tests of tricky parts of SqliteDb.""" @@ -31,7 +32,7 @@ def test_error_reporting(self) -> None: with pytest.raises(DataError, match=msg): with db.execute("select foo from bar"): # Entering the context manager raises the error, this line doesn't run: - pass # pragma: not covered + pass # pragma: not covered def test_retry_execute(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: @@ -49,7 +50,7 @@ def test_retry_execute_failure(self) -> None: with pytest.raises(RuntimeError, match="Fake"): with db.execute("select first from name order by 1"): # Entering the context manager raises the error, this line doesn't run: - pass # pragma: not covered + pass # pragma: not covered def test_retry_executemany_void(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: @@ -76,14 +77,16 @@ def test_retry_executemany_void_failure(self) -> None: def test_open_fails_on_bad_db(self) -> None: self.make_file("bad.db", "boogers") + def fake_failing_open(filename: str, mode: str) -> NoReturn: assert (filename, mode) == ("bad.db", "rb") raise RuntimeError("No you can't!") + with mock.patch.object(coverage.sqlitedb, "open", fake_failing_open): msg = "Couldn't use data file 'bad.db': file is not a database" with pytest.raises(DataError, match=msg): with SqliteDb("bad.db", DebugControlString(options=["sql"])): - pass # pragma: not covered + pass # pragma: not covered def test_execute_void_can_allow_failure(self) -> None: with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db: diff --git a/tests/test_templite.py b/tests/test_templite.py index 3484f71df..6175c10f2 100644 --- a/tests/test_templite.py +++ b/tests/test_templite.py @@ -49,7 +49,7 @@ def assertSynErr(self, msg: str) -> ContextManager[None]: """ pat = "^" + re.escape(msg) + "$" - return pytest.raises(TempliteSyntaxError, match=pat) # type: ignore + return pytest.raises(TempliteSyntaxError, match=pat) # type: ignore def test_passthrough(self) -> None: # Strings without variables are passed through unchanged. @@ -58,7 +58,7 @@ def test_passthrough(self) -> None: def test_variables(self) -> None: # Variables use {{var}} syntax. - self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!") + self.try_render("Hello, {{name}}!", {"name": "Ned"}, "Hello, Ned!") def test_undefined_variables(self) -> None: # Using undefined names is an error. @@ -68,9 +68,9 @@ def test_undefined_variables(self) -> None: def test_pipes(self) -> None: # Variables can be filtered with pipes. data = { - 'name': 'Ned', - 'upper': lambda x: x.upper(), - 'second': lambda x: x[1], + "name": "Ned", + "upper": lambda x: x.upper(), + "second": lambda x: x[1], } self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!") @@ -80,13 +80,13 @@ def test_pipes(self) -> None: def test_reusability(self) -> None: # A single Templite can be used more than once with different data. globs = { - 'upper': lambda x: x.upper(), - 'punct': '!', + "upper": lambda x: x.upper(), + "punct": "!", } template = Templite("This is {{name|upper}}{{punct}}", globs) - assert template.render({'name':'Ned'}) == "This is NED!" - assert template.render({'name':'Ben'}) == "This is BEN!" + assert template.render({"name": "Ned"}) == "This is NED!" + assert template.render({"name": "Ben"}) == "This is BEN!" def test_attribute(self) -> None: # Variables' attributes can be accessed with dots. @@ -100,25 +100,28 @@ def test_member_function(self) -> None: # Variables' member functions can be used, as long as they are nullary. class WithMemberFns(SimpleNamespace): """A class to try out member function access.""" + def ditto(self) -> str: """Return twice the .txt attribute.""" - return self.txt + self.txt # type: ignore + return self.txt + self.txt # type: ignore + obj = WithMemberFns(txt="Once") self.try_render("{{obj.ditto}}", locals(), "OnceOnce") def test_item_access(self) -> None: # Variables' items can be used. - d = {'a':17, 'b':23} + d = {"a": 17, "b": 23} self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23") def test_loops(self) -> None: # Loops work like in Django. - nums = [1,2,3,4] + nums = [1, 2, 3, 4] self.try_render( "Look: {% for n in nums %}{{n}}, {% endfor %}done.", locals(), "Look: 1, 2, 3, 4, done.", ) + # Loop iterables can be filtered. def rev(l: list[int]) -> list[int]: """Return the reverse of `l`.""" @@ -135,22 +138,21 @@ def rev(l: list[int]) -> list[int]: def test_empty_loops(self) -> None: self.try_render( "Empty: {% for n in nums %}{{n}}, {% endfor %}done.", - {'nums':[]}, + {"nums": []}, "Empty: done.", ) def test_multiline_loops(self) -> None: self.try_render( "Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.", - {'nums':[1,2,3]}, + {"nums": [1, 2, 3]}, "Look: \n\n1, \n\n2, \n\n3, \ndone.", ) def test_multiple_loops(self) -> None: self.try_render( - "{% for n in nums %}{{n}}{% endfor %} and " + - "{% for n in nums %}{{n}}{% endfor %}", - {'nums': [1,2,3]}, + "{% for n in nums %}{{n}}{% endfor %} and " + "{% for n in nums %}{{n}}{% endfor %}", + {"nums": [1, 2, 3]}, "123 and 123", ) @@ -158,138 +160,139 @@ def test_comments(self) -> None: # Single-line comments work: self.try_render( "Hello, {# Name goes here: #}{{name}}!", - {'name':'Ned'}, "Hello, Ned!", + {"name": "Ned"}, + "Hello, Ned!", ) # and so do multi-line comments: self.try_render( "Hello, {# Name\ngoes\nhere: #}{{name}}!", - {'name':'Ned'}, "Hello, Ned!", + {"name": "Ned"}, + "Hello, Ned!", ) def test_if(self) -> None: self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", - {'ned': 1, 'ben': 0}, + {"ned": 1, "ben": 0}, "Hi, NED!", ) self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", - {'ned': 0, 'ben': 1}, + {"ned": 0, "ben": 1}, "Hi, BEN!", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", - {'ned': 0, 'ben': 0}, + {"ned": 0, "ben": 0}, "Hi, !", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", - {'ned': 1, 'ben': 0}, + {"ned": 1, "ben": 0}, "Hi, NED!", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", - {'ned': 1, 'ben': 1}, + {"ned": 1, "ben": 1}, "Hi, NEDBEN!", ) def test_complex_if(self) -> None: class Complex(SimpleNamespace): """A class to try out complex data access.""" - def getit(self): # type: ignore + + def getit(self): # type: ignore """Return it.""" return self.it - obj = Complex(it={'x':"Hello", 'y': 0}) + + obj = Complex(it={"x": "Hello", "y": 0}) self.try_render( - "@" + - "{% if obj.getit.x %}X{% endif %}" + - "{% if obj.getit.y %}Y{% endif %}" + - "{% if obj.getit.y|str %}S{% endif %}" + - "!", - { 'obj': obj, 'str': str }, + "@" + + "{% if obj.getit.x %}X{% endif %}" + + "{% if obj.getit.y %}Y{% endif %}" + + "{% if obj.getit.y|str %}S{% endif %}" + + "!", + {"obj": obj, "str": str}, "@XS!", ) def test_loop_if(self) -> None: self.try_render( "@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!", - {'nums': [0,1,2]}, + {"nums": [0, 1, 2]}, "@0Z1Z2!", ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", - {'nums': [0,1,2]}, + {"nums": [0, 1, 2]}, "X@012!", ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", - {'nums': []}, + {"nums": []}, "X!", ) def test_nested_loops(self) -> None: self.try_render( - "@" + - "{% for n in nums %}" + - "{% for a in abc %}{{a}}{{n}}{% endfor %}" + - "{% endfor %}" + - "!", - {'nums': [0,1,2], 'abc': ['a', 'b', 'c']}, + "@" + + "{% for n in nums %}" + + "{% for a in abc %}{{a}}{{n}}{% endfor %}" + + "{% endfor %}" + + "!", + {"nums": [0, 1, 2], "abc": ["a", "b", "c"]}, "@a0b0c0a1b1c1a2b2c2!", ) def test_whitespace_handling(self) -> None: self.try_render( - "@{% for n in nums %}\n" + - " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" + - "{% endfor %}!\n", - {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, + "@{% for n in nums %}\n" + + " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" + + "{% endfor %}!\n", + {"nums": [0, 1, 2], "abc": ["a", "b", "c"]}, "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n", ) self.try_render( - "@{% for n in nums -%}\n" + - " {% for a in abc -%}\n" + - " {# this disappears completely -#}\n" + - " {{a-}}\n" + - " {{n -}}\n" + - " {{n -}}\n" + - " {% endfor %}\n" + - "{% endfor %}!\n", - {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, + "@{% for n in nums -%}\n" + + " {% for a in abc -%}\n" + + " {# this disappears completely -#}\n" + + " {{a-}}\n" + + " {{n -}}\n" + + " {{n -}}\n" + + " {% endfor %}\n" + + "{% endfor %}!\n", + {"nums": [0, 1, 2], "abc": ["a", "b", "c"]}, "@a00b00c00\na11b11c11\na22b22c22\n!\n", ) self.try_render( - "@{% for n in nums -%}\n" + - " {{n -}}\n" + - " x\n" + - "{% endfor %}!\n", - {'nums': [0, 1, 2]}, + "@{% for n in nums -%}\n" + " {{n -}}\n" + " x\n" + "{% endfor %}!\n", + {"nums": [0, 1, 2]}, "@0x\n1x\n2x\n!\n", ) self.try_render(" hello ", {}, " hello ") def test_eat_whitespace(self) -> None: self.try_render( - "Hey!\n" + - "{% joined %}\n" + - "@{% for n in nums %}\n" + - " {% for a in abc %}\n" + - " {# this disappears completely #}\n" + - " X\n" + - " Y\n" + - " {{a}}\n" + - " {{n }}\n" + - " {% endfor %}\n" + - "{% endfor %}!\n" + - "{% endjoined %}\n", - {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, + "Hey!\n" + + "{% joined %}\n" + + "@{% for n in nums %}\n" + + " {% for a in abc %}\n" + + " {# this disappears completely #}\n" + + " X\n" + + " Y\n" + + " {{a}}\n" + + " {{n }}\n" + + " {% endfor %}\n" + + "{% endfor %}!\n" + + "{% endjoined %}\n", + {"nums": [0, 1, 2], "abc": ["a", "b", "c"]}, "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n", ) def test_non_ascii(self) -> None: self.try_render( "{{where}} ollĮÉĨ", - { 'where': 'ĮÉšĮÉĨʇ' }, + {"where": "ĮÉšĮÉĨʇ"}, "ĮÉšĮÉĨʇ ollĮÉĨ", ) @@ -298,7 +301,9 @@ def test_exception_during_evaluation(self) -> None: regex = "^Couldn't evaluate None.bar$" with pytest.raises(TempliteValueError, match=regex): self.try_render( - "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there", + "Hey {{foo.bar.baz}} there", + {"foo": None}, + "Hey ??? there", ) def test_bad_names(self) -> None: diff --git a/tests/test_testing.py b/tests/test_testing.py index c026d536d..5f3a4c771 100644 --- a/tests/test_testing.py +++ b/tests/test_testing.py @@ -21,10 +21,15 @@ from tests.coveragetest import CoverageTest from tests.helpers import ( - CheckUniqueFilenames, FailingProxy, + CheckUniqueFilenames, + FailingProxy, all_our_source_files, - arcz_to_arcs, assert_count_equal, assert_coverage_warnings, - re_lines, re_lines_text, re_line, + arcz_to_arcs, + assert_count_equal, + assert_coverage_warnings, + re_lines, + re_lines_text, + re_line, ) @@ -32,9 +37,9 @@ def test_assert_count_equal() -> None: assert_count_equal(set(), set()) assert_count_equal({"a": 1, "b": 2}, ["b", "a"]) with pytest.raises(AssertionError): - assert_count_equal({1,2,3}, set()) + assert_count_equal({1, 2, 3}, set()) with pytest.raises(AssertionError): - assert_count_equal({1,2,3}, {4,5,6}) + assert_count_equal({1, 2, 3}, {4, 5, 6}) class CoverageTestTest(CoverageTest): @@ -60,14 +65,14 @@ def test_file_count(self) -> None: self.assert_file_count("afile.*", 1) self.assert_file_count("*.q", 0) msg = re.escape( - "There should be 13 files matching 'a*.txt', but there are these: " + - "['abcde.txt', 'afile.txt', 'axczz.txt']", + "There should be 13 files matching 'a*.txt', but there are these: " + + "['abcde.txt', 'afile.txt', 'axczz.txt']", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("a*.txt", 13) msg = re.escape( - "There should be 12 files matching '*c*.txt', but there are these: " + - "['abcde.txt', 'axczz.txt']", + "There should be 12 files matching '*c*.txt', but there are these: " + + "['abcde.txt', 'axczz.txt']", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("*c*.txt", 12) @@ -167,17 +172,20 @@ def test_assert_no_warnings(self) -> None: def test_sub_python_is_this_python(self) -> None: # Try it with a Python command. - self.set_environ('COV_FOOBAR', 'XYZZY') - self.make_file("showme.py", """\ + self.set_environ("COV_FOOBAR", "XYZZY") + self.make_file( + "showme.py", + """\ import os, sys print(sys.executable) print(os.__file__) print(os.environ['COV_FOOBAR']) - """) + """, + ) out_lines = self.run_command("python showme.py").splitlines() assert actual_path(out_lines[0]) == actual_path(sys.executable) assert out_lines[1] == os.__file__ - assert out_lines[2] == 'XYZZY' + assert out_lines[2] == "XYZZY" # Try it with a "coverage debug sys" command. out = self.run_command("coverage debug sys") @@ -193,11 +201,14 @@ def test_sub_python_is_this_python(self) -> None: def test_run_command_stdout_stderr(self) -> None: # run_command should give us both stdout and stderr. - self.make_file("outputs.py", """\ + self.make_file( + "outputs.py", + """\ import sys sys.stderr.write("StdErr\\n") print("StdOut") - """) + """, + ) out = self.run_command("python outputs.py") assert "StdOut\n" in out assert "StdErr\n" in out @@ -219,6 +230,7 @@ class CheckUniqueFilenamesTest(CoverageTest): class Stub: """A stand-in for the class we're checking.""" + def __init__(self, x: int) -> None: self.x = x @@ -296,34 +308,46 @@ class ReLinesTest(CoverageTest): run_in_temp_dir = False - @pytest.mark.parametrize("pat, text, result", [ - ("line", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), - ("[13]", "line1\nline2\nline3\n", "line1\nline3\n"), - ("X", "line1\nline2\nline3\n", ""), - ]) + @pytest.mark.parametrize( + "pat, text, result", + [ + ("line", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), + ("[13]", "line1\nline2\nline3\n", "line1\nline3\n"), + ("X", "line1\nline2\nline3\n", ""), + ], + ) def test_re_lines(self, pat: str, text: str, result: str) -> None: assert re_lines_text(pat, text) == result assert re_lines(pat, text) == result.splitlines() - @pytest.mark.parametrize("pat, text, result", [ - ("line", "line1\nline2\nline3\n", ""), - ("[13]", "line1\nline2\nline3\n", "line2\n"), - ("X", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), - ]) + @pytest.mark.parametrize( + "pat, text, result", + [ + ("line", "line1\nline2\nline3\n", ""), + ("[13]", "line1\nline2\nline3\n", "line2\n"), + ("X", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), + ], + ) def test_re_lines_inverted(self, pat: str, text: str, result: str) -> None: assert re_lines_text(pat, text, match=False) == result assert re_lines(pat, text, match=False) == result.splitlines() - @pytest.mark.parametrize("pat, text, result", [ - ("2", "line1\nline2\nline3\n", "line2"), - ]) + @pytest.mark.parametrize( + "pat, text, result", + [ + ("2", "line1\nline2\nline3\n", "line2"), + ], + ) def test_re_line(self, pat: str, text: str, result: str) -> None: assert re_line(pat, text) == result - @pytest.mark.parametrize("pat, text", [ - ("line", "line1\nline2\nline3\n"), # too many matches - ("X", "line1\nline2\nline3\n"), # no matches - ]) + @pytest.mark.parametrize( + "pat, text", + [ + ("line", "line1\nline2\nline3\n"), # too many matches + ("X", "line1\nline2\nline3\n"), # no matches + ], + ) def test_re_line_bad(self, pat: str, text: str) -> None: with pytest.raises(AssertionError): re_line(pat, text) @@ -341,7 +365,7 @@ def _same_python_executable(e1: str, e2: str) -> bool: e2 = os.path.abspath(os.path.realpath(e2)) if os.path.dirname(e1) != os.path.dirname(e2): - return False # pragma: only failure + return False # pragma: only failure e1 = os.path.basename(e1) e2 = os.path.basename(e2) @@ -353,7 +377,7 @@ def _same_python_executable(e1: str, e2: str) -> bool: # Python2.3 and Python2.3: OK return True - return False # pragma: only failure + return False # pragma: only failure class ArczTest(CoverageTest): @@ -361,11 +385,14 @@ class ArczTest(CoverageTest): run_in_temp_dir = False - @pytest.mark.parametrize("arcz, arcs", [ - (".1 12 2.", [(-1, 1), (1, 2), (2, -1)]), - ("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]), - ("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]), - ]) + @pytest.mark.parametrize( + "arcz, arcs", + [ + (".1 12 2.", [(-1, 1), (1, 2), (2, -1)]), + ("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]), + ("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]), + ], + ) def test_arcz_to_arcs(self, arcz: str, arcs: list[TArc]) -> None: assert arcz_to_arcs(arcz) == arcs @@ -427,11 +454,12 @@ def test_regex_doesnt_match(self) -> None: def test_failing_proxy() -> None: class Arithmetic: """Sample class to test FailingProxy.""" + # pylint: disable=missing-function-docstring - def add(self, a, b): # type: ignore[no-untyped-def] + def add(self, a, b): # type: ignore[no-untyped-def] return a + b - def subtract(self, a, b): # type: ignore[no-untyped-def] + def subtract(self, a, b): # type: ignore[no-untyped-def] return a - b proxy = FailingProxy(Arithmetic(), "add", [RuntimeError("First"), RuntimeError("Second")]) diff --git a/tests/test_venv.py b/tests/test_venv.py index 134641d66..6b5f0d942 100644 --- a/tests/test_venv.py +++ b/tests/test_venv.py @@ -33,9 +33,9 @@ def run_in_venv(cmd: str) -> str: """ words = cmd.split() if env.WINDOWS: - words[0] = fr"venv\Scripts\{words[0]}.exe" + words[0] = rf"venv\Scripts\{words[0]}.exe" else: - words[0] = fr"venv/bin/{words[0]}" + words[0] = rf"venv/bin/{words[0]}" status, output = run_command(" ".join(words)) # Print the output so if it fails, we can tell what happened. print(output) @@ -56,107 +56,149 @@ def venv_world_fixture(tmp_path_factory: pytest.TempPathFactory) -> Path: run_command("python -m venv venv") # A third-party package that installs a few different packages. - make_file("third_pkg/third/__init__.py", """\ + make_file( + "third_pkg/third/__init__.py", + """\ import fourth def third(x): return 3 * x - """) + """, + ) # Use plugin2.py as third.plugin with open(os.path.join(os.path.dirname(__file__), "plugin2.py"), encoding="utf-8") as f: make_file("third_pkg/third/plugin.py", f.read()) # A render function for plugin2 to use for dynamic file names. - make_file("third_pkg/third/render.py", """\ + make_file( + "third_pkg/third/render.py", + """\ def render(filename, linenum): return "HTML: {}@{}".format(filename, linenum) - """) + """, + ) # Another package that third can use. - make_file("third_pkg/fourth/__init__.py", """\ + make_file( + "third_pkg/fourth/__init__.py", + """\ def fourth(x): return 4 * x - """) + """, + ) # Some namespace packages. - make_file("third_pkg/nspkg/fifth/__init__.py", """\ + make_file( + "third_pkg/nspkg/fifth/__init__.py", + """\ def fifth(x): return 5 * x - """) + """, + ) # The setup.py to install everything. - make_file("third_pkg/setup.py", """\ + make_file( + "third_pkg/setup.py", + """\ import setuptools setuptools.setup( name="third", packages=["third", "fourth", "nspkg.fifth"], ) - """) + """, + ) # Some namespace packages. - make_file("another_pkg/nspkg/sixth/__init__.py", """\ + make_file( + "another_pkg/nspkg/sixth/__init__.py", + """\ def sixth(x): return 6 * x - """) - make_file("another_pkg/setup.py", """\ + """, + ) + make_file( + "another_pkg/setup.py", + """\ import setuptools setuptools.setup( name="another", packages=["nspkg.sixth"], ) - """) + """, + ) # Bug888 code. - make_file("bug888/app/setup.py", """\ + make_file( + "bug888/app/setup.py", + """\ from setuptools import setup setup( name='testcov', packages=['testcov'], ) - """) + """, + ) # https://packaging.python.org/en/latest/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages - make_file("bug888/app/testcov/__init__.py", """\ + make_file( + "bug888/app/testcov/__init__.py", + """\ __path__ = __import__('pkgutil').extend_path(__path__, __name__) - """) + """, + ) if env.PYVERSION < (3, 10): get_plugins = "entry_points['plugins']" else: get_plugins = "entry_points.select(group='plugins')" - make_file("bug888/app/testcov/main.py", f"""\ + make_file( + "bug888/app/testcov/main.py", + f"""\ import importlib.metadata entry_points = importlib.metadata.entry_points() for entry_point in {get_plugins}: entry_point.load()() - """) - make_file("bug888/plugin/setup.py", """\ + """, + ) + make_file( + "bug888/plugin/setup.py", + """\ from setuptools import setup setup( name='testcov-plugin', packages=['testcov'], entry_points={'plugins': ['testp = testcov.plugin:testp']}, ) - """) + """, + ) # https://packaging.python.org/en/latest/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages - make_file("bug888/plugin/testcov/__init__.py", """\ + make_file( + "bug888/plugin/testcov/__init__.py", + """\ __path__ = __import__('pkgutil').extend_path(__path__, __name__) - """) - make_file("bug888/plugin/testcov/plugin.py", """\ + """, + ) + make_file( + "bug888/plugin/testcov/plugin.py", + """\ def testp(): print("Plugin here") - """) + """, + ) # Install everything. run_in_venv( - "python -m pip install " + - "./third_pkg " + - "-e ./another_pkg " + - "-e ./bug888/app -e ./bug888/plugin " + - COVERAGE_INSTALL_ARGS, + "python -m pip install " + + "./third_pkg " + + "-e ./another_pkg " + + "-e ./bug888/app -e ./bug888/plugin " + + COVERAGE_INSTALL_ARGS, ) shutil.rmtree("third_pkg") return venv_world -@pytest.fixture(params=[ - "coverage", - "python -m coverage", -], name="coverage_command") +@pytest.fixture( + params=[ + "coverage", + "python -m coverage", + ], + name="coverage_command", +) def coverage_command_fixture(request: pytest.FixtureRequest) -> str: """Parametrized fixture to use multiple forms of "coverage" command.""" return cast(str, request.param) @@ -172,7 +214,9 @@ class VirtualenvTest(CoverageTest): def in_venv_world_fixture(self, venv_world: Path) -> Iterator[None]: """For running tests inside venv_world, and cleaning up made files.""" with change_dir(venv_world): - self.make_file("myproduct.py", """\ + self.make_file( + "myproduct.py", + """\ import colorsys import third import nspkg.fifth @@ -181,9 +225,10 @@ def in_venv_world_fixture(self, venv_world: Path) -> Iterator[None]: print(nspkg.fifth.fifth(22)) print(nspkg.sixth.sixth(33)) print(sum(colorsys.rgb_to_hls(1, 0, 0))) - """) + """, + ) - self.del_environ("COVERAGE_TESTING") # To get realistic behavior + self.del_environ("COVERAGE_TESTING") # To get realistic behavior self.set_environ("COVERAGE_DEBUG_FILE", "debug_out.txt") self.set_environ("COVERAGE_DEBUG", "trace") @@ -198,25 +243,30 @@ def get_trace_output(self) -> str: with open("debug_out.txt", encoding="utf-8") as f: return f.read() - @pytest.mark.parametrize('install_source_in_venv', [True, False]) + @pytest.mark.parametrize("install_source_in_venv", [True, False]) def test_third_party_venv_isnt_measured( - self, coverage_command: str, install_source_in_venv: bool, + self, + coverage_command: str, + install_source_in_venv: bool, ) -> None: if install_source_in_venv: - make_file("setup.py", """\ + make_file( + "setup.py", + """\ import setuptools setuptools.setup( name="myproduct", py_modules = ["myproduct"], ) - """) + """, + ) try: run_in_venv("python -m pip install .") finally: shutil.rmtree("build", ignore_errors=True) shutil.rmtree("myproduct.egg-info", ignore_errors=True) # Ensure that coverage doesn't run the non-installed module. - os.remove('myproduct.py') + os.remove("myproduct.py") out = run_in_venv(coverage_command + " run --source=.,myproduct -m myproduct") else: out = run_in_venv(coverage_command + " run --source=. myproduct.py") @@ -251,8 +301,8 @@ def test_us_in_venv_isnt_measured(self, coverage_command: str) -> None: # --source refers to a module. debug_out = self.get_trace_output() assert re_lines( - r"^Not tracing .*\bexecfile.py': " + - "module 'coverage.execfile' falls outside the --source spec", + r"^Not tracing .*\bexecfile.py': " + + "module 'coverage.execfile' falls outside the --source spec", debug_out, ) assert re_lines( @@ -293,10 +343,13 @@ def test_venv_with_dynamic_plugin(self, coverage_command: str) -> None: # It happened because coverage imported the plugin, which imported # Django, and then the Django files were reported as traceable. self.make_file(".coveragerc", "[run]\nplugins=third.plugin\n") - self.make_file("myrender.py", """\ + self.make_file( + "myrender.py", + """\ import third.render print(third.render.render("hello.html", 1723)) - """) + """, + ) out = run_in_venv(coverage_command + " run --source=. myrender.py") # The output should not have this warning: # Already imported a file that will be measured: ...third/render.py (already-imported) @@ -315,8 +368,8 @@ def test_installed_namespace_packages(self, coverage_command: str) -> None: # --source refers to a file. debug_out = self.get_trace_output() assert re_lines( - r"^Not tracing .*\bexecfile.py': " + - "module 'coverage.execfile' falls outside the --source spec", + r"^Not tracing .*\bexecfile.py': " + + "module 'coverage.execfile' falls outside the --source spec", debug_out, ) assert re_lines( @@ -346,8 +399,7 @@ def test_installed_namespace_packages(self, coverage_command: str) -> None: def test_bug_888(self, coverage_command: str) -> None: out = run_in_venv( - coverage_command + - " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py", + coverage_command + " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py", ) # When the test fails, the output includes "Already imported a file that will be measured" assert out == "Plugin here\n" diff --git a/tests/test_version.py b/tests/test_version.py index 7e8935257..ebfea4a40 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -20,24 +20,24 @@ def test_version_info(self) -> None: # Make sure we didn't screw up the version_info tuple. assert isinstance(coverage.version_info, tuple) assert [type(d) for d in coverage.version_info] == [int, int, int, str, int] - assert coverage.version_info[3] in {'alpha', 'beta', 'candidate', 'final'} + assert coverage.version_info[3] in {"alpha", "beta", "candidate", "final"} def test_make_version(self) -> None: - assert _make_version(4, 0, 0, 'alpha') == "4.0.0a0" - assert _make_version(4, 0, 0, 'alpha', 1) == "4.0.0a1" - assert _make_version(4, 0, 0, 'final') == "4.0.0" + assert _make_version(4, 0, 0, "alpha") == "4.0.0a0" + assert _make_version(4, 0, 0, "alpha", 1) == "4.0.0a1" + assert _make_version(4, 0, 0, "final") == "4.0.0" assert _make_version(4, 1, 0) == "4.1.0" - assert _make_version(4, 1, 2, 'beta', 3) == "4.1.2b3" + assert _make_version(4, 1, 2, "beta", 3) == "4.1.2b3" assert _make_version(4, 1, 2) == "4.1.2" - assert _make_version(5, 10, 2, 'candidate', 7) == "5.10.2rc7" - assert _make_version(5, 10, 2, 'candidate', 7, 3) == "5.10.2rc7.dev3" + assert _make_version(5, 10, 2, "candidate", 7) == "5.10.2rc7" + assert _make_version(5, 10, 2, "candidate", 7, 3) == "5.10.2rc7.dev3" def test_make_url(self) -> None: expected = "https://coverage.readthedocs.io/en/4.1.2" - assert _make_url(4, 1, 2, 'final') == expected + assert _make_url(4, 1, 2, "final") == expected expected = "https://coverage.readthedocs.io/en/4.1.2b3" - assert _make_url(4, 1, 2, 'beta', 3) == expected + assert _make_url(4, 1, 2, "beta", 3) == expected expected = "https://coverage.readthedocs.io/en/4.1.2b3.dev17" - assert _make_url(4, 1, 2, 'beta', 3, 17) == expected + assert _make_url(4, 1, 2, "beta", 3, 17) == expected expected = "https://coverage.readthedocs.io/en/4.1.2.dev17" - assert _make_url(4, 1, 2, 'final', 0, 17) == expected + assert _make_url(4, 1, 2, "final", 0, 17) == expected diff --git a/tests/test_xml.py b/tests/test_xml.py index c374ee0a2..ecd74eae5 100644 --- a/tests/test_xml.py +++ b/tests/test_xml.py @@ -56,7 +56,7 @@ def here(p: str) -> str: for i in range(width): next_dir = here(f"d{i}") - self.make_tree(width, depth-1, next_dir) + self.make_tree(width, depth - 1, next_dir) if curdir != ".": self.make_file(here("__init__.py"), "") for i in range(width): @@ -80,7 +80,8 @@ class XmlTestHelpersTest(XmlTestHelpers, CoverageTest): run_in_temp_dir = False def test_assert_source(self) -> None: - dom = ElementTree.fromstring("""\ + dom = ElementTree.fromstring( + """\ foo @@ -88,7 +89,8 @@ def test_assert_source(self) -> None: {cwd}another - """.format(cwd=abs_file(".")+os.sep)) + """.format(cwd=abs_file(".") + os.sep) + ) self.assert_source(dom, "something") self.assert_source(dom, "another") @@ -172,7 +174,7 @@ def test_filename_format_showing_everything(self) -> None: dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 - assert elts[0].get('filename') == "sub/doit.py" + assert elts[0].get("filename") == "sub/doit.py" def test_filename_format_including_filename(self) -> None: cov = self.run_doit() @@ -180,16 +182,17 @@ def test_filename_format_including_filename(self) -> None: dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 - assert elts[0].get('filename') == "sub/doit.py" + assert elts[0].get("filename") == "sub/doit.py" def test_filename_format_including_module(self) -> None: cov = self.run_doit() - import sub.doit # pylint: disable=import-error + import sub.doit # pylint: disable=import-error + cov.xml_report([sub.doit]) dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 - assert elts[0].get('filename') == "sub/doit.py" + assert elts[0].get("filename") == "sub/doit.py" def test_reporting_on_nothing(self) -> None: # Used to raise a zero division error: @@ -201,8 +204,8 @@ def test_reporting_on_nothing(self) -> None: dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='empty.py']") assert len(elts) == 1 - assert elts[0].get('filename') == "empty.py" - assert elts[0].get('line-rate') == '1' + assert elts[0].get("filename") == "empty.py" + assert elts[0].get("line-rate") == "1" def test_empty_file_is_100_not_0(self) -> None: # https://github.com/nedbat/coveragepy/issues/345 @@ -211,7 +214,7 @@ def test_empty_file_is_100_not_0(self) -> None: dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='__init__.py']") assert len(elts) == 1 - assert elts[0].get('line-rate') == '1' + assert elts[0].get("line-rate") == "1" def test_empty_file_is_skipped(self) -> None: cov = self.run_doit() @@ -250,7 +253,7 @@ def test_deep_source(self) -> None: ) dom = ElementTree.parse("coverage.xml") - self.assert_source(dom, "src/main") # type: ignore[arg-type] + self.assert_source(dom, "src/main") # type: ignore[arg-type] self.assert_source(dom, "also/over/there") # type: ignore[arg-type] sources = dom.findall(".//source") assert len(sources) == 2 @@ -258,21 +261,21 @@ def test_deep_source(self) -> None: foo_class = dom.findall(".//class[@name='foo.py']") assert len(foo_class) == 1 assert foo_class[0].attrib == { - 'branch-rate': '0', - 'complexity': '0', - 'filename': 'foo.py', - 'line-rate': '1', - 'name': 'foo.py', + "branch-rate": "0", + "complexity": "0", + "filename": "foo.py", + "line-rate": "1", + "name": "foo.py", } bar_class = dom.findall(".//class[@name='bar.py']") assert len(bar_class) == 1 assert bar_class[0].attrib == { - 'branch-rate': '0', - 'complexity': '0', - 'filename': 'bar.py', - 'line-rate': '1', - 'name': 'bar.py', + "branch-rate": "0", + "complexity": "0", + "filename": "bar.py", + "line-rate": "1", + "name": "bar.py", } def test_nonascii_directory(self) -> None: @@ -386,7 +389,7 @@ def package_and_class_tags(self, cov: Coverage) -> Iterator[tuple[str, dict[str, dom = ElementTree.parse("coverage.xml") for node in dom.iter(): if node.tag in ["package", "class"]: - yield (node.tag, {a:v for a,v in node.items() if a in ["name", "filename"]}) + yield (node.tag, {a: v for a, v in node.items() if a in ["name", "filename"]}) def assert_package_and_class_tags(self, cov: Coverage, result: Any) -> None: """Check the XML package and class tags from `cov` match `result`.""" @@ -394,87 +397,111 @@ def assert_package_and_class_tags(self, cov: Coverage, result: Any) -> None: def test_package_names(self) -> None: self.make_tree(width=1, depth=3) - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ from d0.d0 import f0 - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") - self.assert_package_and_class_tags(cov, [ - ('package', {'name': "."}), - ('class', {'filename': "main.py", 'name': "main.py"}), - ('package', {'name': "d0"}), - ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), - ('package', {'name': "d0.d0"}), - ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), - ]) + self.assert_package_and_class_tags( + cov, + [ + ("package", {"name": "."}), + ("class", {"filename": "main.py", "name": "main.py"}), + ("package", {"name": "d0"}), + ("class", {"filename": "d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/f0.py", "name": "f0.py"}), + ("package", {"name": "d0.d0"}), + ("class", {"filename": "d0/d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/d0/f0.py", "name": "f0.py"}), + ], + ) def test_package_depth_1(self) -> None: self.make_tree(width=1, depth=4) - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ from d0.d0 import f0 - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 1) - self.assert_package_and_class_tags(cov, [ - ('package', {'name': "."}), - ('class', {'filename': "main.py", 'name': "main.py"}), - ('package', {'name': "d0"}), - ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/d0/__init__.py", 'name': "d0/__init__.py"}), - ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/d0/__init__.py"}), - ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/d0/f0.py"}), - ('class', {'filename': "d0/d0/f0.py", 'name': "d0/f0.py"}), - ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), - ]) + self.assert_package_and_class_tags( + cov, + [ + ("package", {"name": "."}), + ("class", {"filename": "main.py", "name": "main.py"}), + ("package", {"name": "d0"}), + ("class", {"filename": "d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/d0/__init__.py", "name": "d0/__init__.py"}), + ("class", {"filename": "d0/d0/d0/__init__.py", "name": "d0/d0/__init__.py"}), + ("class", {"filename": "d0/d0/d0/f0.py", "name": "d0/d0/f0.py"}), + ("class", {"filename": "d0/d0/f0.py", "name": "d0/f0.py"}), + ("class", {"filename": "d0/f0.py", "name": "f0.py"}), + ], + ) def test_package_depth_2(self) -> None: self.make_tree(width=1, depth=4) - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ from d0.d0 import f0 - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 2) - self.assert_package_and_class_tags(cov, [ - ('package', {'name': "."}), - ('class', {'filename': "main.py", 'name': "main.py"}), - ('package', {'name': "d0"}), - ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), - ('package', {'name': "d0.d0"}), - ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/__init__.py"}), - ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/f0.py"}), - ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), - ]) + self.assert_package_and_class_tags( + cov, + [ + ("package", {"name": "."}), + ("class", {"filename": "main.py", "name": "main.py"}), + ("package", {"name": "d0"}), + ("class", {"filename": "d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/f0.py", "name": "f0.py"}), + ("package", {"name": "d0.d0"}), + ("class", {"filename": "d0/d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/d0/d0/__init__.py", "name": "d0/__init__.py"}), + ("class", {"filename": "d0/d0/d0/f0.py", "name": "d0/f0.py"}), + ("class", {"filename": "d0/d0/f0.py", "name": "f0.py"}), + ], + ) def test_package_depth_3(self) -> None: self.make_tree(width=1, depth=4) - self.make_file("main.py", """\ + self.make_file( + "main.py", + """\ from d0.d0 import f0 - """) + """, + ) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 3) - self.assert_package_and_class_tags(cov, [ - ('package', {'name': "."}), - ('class', {'filename': "main.py", 'name': "main.py"}), - ('package', {'name': "d0"}), - ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), - ('package', {'name': "d0.d0"}), - ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), - ('package', {'name': "d0.d0.d0"}), - ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "__init__.py"}), - ('class', {'filename': "d0/d0/d0/f0.py", 'name': "f0.py"}), - ]) + self.assert_package_and_class_tags( + cov, + [ + ("package", {"name": "."}), + ("class", {"filename": "main.py", "name": "main.py"}), + ("package", {"name": "d0"}), + ("class", {"filename": "d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/f0.py", "name": "f0.py"}), + ("package", {"name": "d0.d0"}), + ("class", {"filename": "d0/d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/d0/f0.py", "name": "f0.py"}), + ("package", {"name": "d0.d0.d0"}), + ("class", {"filename": "d0/d0/d0/__init__.py", "name": "__init__.py"}), + ("class", {"filename": "d0/d0/d0/f0.py", "name": "f0.py"}), + ], + ) def test_source_prefix(self) -> None: # https://github.com/nedbat/coveragepy/issues/465 @@ -483,12 +510,15 @@ def test_source_prefix(self) -> None: cov = coverage.Coverage(source=["src"]) self.start_import_stop(cov, "mod", modfile="src/mod.py") - self.assert_package_and_class_tags(cov, [ - ('package', {'name': "."}), - ('class', {'filename': "mod.py", 'name': "mod.py"}), - ]) + self.assert_package_and_class_tags( + cov, + [ + ("package", {"name": "."}), + ("class", {"filename": "mod.py", "name": "mod.py"}), + ], + ) dom = ElementTree.parse("coverage.xml") - self.assert_source(dom, "src") # type: ignore[arg-type] + self.assert_source(dom, "src") # type: ignore[arg-type] @pytest.mark.parametrize("trail", ["", "/", "\\"]) def test_relative_source(self, trail: str) -> None: @@ -509,11 +539,11 @@ def compare_xml(expected: str, actual: str, actual_extra: bool = False) -> None: """Specialized compare function for our XML files.""" source_path = coverage.files.relative_directory().rstrip(r"\/") - scrubs=[ + scrubs = [ (r' timestamp="\d+"', ' timestamp="TIMESTAMP"'), (r' version="[-.\w]+"', ' version="VERSION"'), - (r'\s*.*?\s*', '%s' % re.escape(source_path)), - (r'/coverage\.readthedocs\.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), + (r"\s*.*?\s*", "%s" % re.escape(source_path)), + (r"/coverage\.readthedocs\.io/?[-.\w/]*", "/coverage.readthedocs.io/VER"), ] compare(expected, actual, scrubs=scrubs, actual_extra=actual_extra) @@ -522,13 +552,16 @@ class XmlGoldTest(CoverageTest): """Tests of XML reporting that use gold files.""" def test_a_xml_1(self) -> None: - self.make_file("a.py", """\ + self.make_file( + "a.py", + """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 - """) + """, + ) cov = coverage.Coverage() a = self.start_import_stop(cov, "a") @@ -536,19 +569,25 @@ def test_a_xml_1(self) -> None: compare_xml(gold_path("xml/x_xml"), ".", actual_extra=True) def test_a_xml_2(self) -> None: - self.make_file("a.py", """\ + self.make_file( + "a.py", + """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 - """) + """, + ) - self.make_file("run_a_xml_2.ini", """\ + self.make_file( + "run_a_xml_2.ini", + """\ # Put all the XML output in xml_2 [xml] output = xml_2/coverage.xml - """) + """, + ) cov = coverage.Coverage(config_file="run_a_xml_2.ini") a = self.start_import_stop(cov, "a") @@ -556,7 +595,9 @@ def test_a_xml_2(self) -> None: compare_xml(gold_path("xml/x_xml"), "xml_2") def test_y_xml_branch(self) -> None: - self.make_file("y.py", """\ + self.make_file( + "y.py", + """\ def choice(x): if x < 2: return 3 @@ -564,7 +605,8 @@ def choice(x): return 4 assert choice(1) == 3 - """) + """, + ) cov = coverage.Coverage(branch=True) y = self.start_import_stop(cov, "y") From bfeb2aec1de7c298060b0c972f86d0f1f3e4da71 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 08:20:58 -0400 Subject: [PATCH 20/43] style: fix things so pylint is happy with ruff --- .git-blame-ignore-revs | 3 +++ coverage/phystokens.py | 4 +-- coverage/pytracer.py | 4 +-- tests/test_debug.py | 2 +- tests/test_parser.py | 59 ++++++++++++++++++++++-------------------- 5 files changed, 39 insertions(+), 33 deletions(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index b5f9b4c12..8d039e619 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -30,3 +30,6 @@ e4e238a9ed8f2ad2b9060247591b4c057c2953bf # 2025-08-09 style: ruff check --fix --fixable=I001,Q000 coverage/*.py a32cd74bc0aaf0714acaf37beb21c84cbe2ee3ff + +# 2025-08-21 chore: `ruff format .` +82467f72306efdb207af09ace27b6b3ed4c7ad6f diff --git a/coverage/phystokens.py b/coverage/phystokens.py index 17d128da6..58f1d3e88 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -55,8 +55,8 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos: if last_ttext.endswith("\\"): inject_backslash = False elif ttype == token.STRING: - if ( - last_line.endswith("\\\n") # pylint: disable=simplifiable-if-statement + if ( # pylint: disable=simplifiable-if-statement + last_line.endswith("\\\n") and last_line.rstrip(" \\\n").endswith(last_ttext) ): # Deal with special cases like such code:: diff --git a/coverage/pytracer.py b/coverage/pytracer.py index ea178add5..f32015d30 100644 --- a/coverage/pytracer.py +++ b/coverage/pytracer.py @@ -128,8 +128,8 @@ def log(self, marker: str, *args: Any) -> None: with open("/tmp/debug_trace.txt", "a", encoding="utf-8") as f: f.write(f"{marker} {self.id}[{len(self.data_stack)}]") if 0: # if you want thread ids.. - f.write( - ".{:x}.{:x}".format( # type: ignore[unreachable] + f.write( # type: ignore[unreachable] + ".{:x}.{:x}".format( self.thread.ident, self.threading.current_thread().ident, ) diff --git a/tests/test_debug.py b/tests/test_debug.py index 19f2a5828..74ac199ed 100644 --- a/tests/test_debug.py +++ b/tests/test_debug.py @@ -338,7 +338,7 @@ def test_debug_write_exceptions(self) -> None: lines = debug.get_output().splitlines() assert "Something happened" == lines[0] assert "Traceback (most recent call last):" == lines[1] - assert " raise RuntimeError('Oops') # This is in the traceback" in lines + assert ' raise RuntimeError("Oops") # This is in the traceback' in lines assert "RuntimeError: Oops" == lines[-1] def test_debug_write_self(self) -> None: diff --git a/tests/test_parser.py b/tests/test_parser.py index 07ff0287c..6b278ce4a 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -547,87 +547,90 @@ def very_long_function_to_exclude_name( exclude="function_to_exclude", ) assert parser.statements == {1, 7} + # linters and formatters can't agree about whether strings are too + # long, so format in the long variable names that make them too long. + long_arg = "super_long_input_argument" parser = self.parse_text( - """\ + f"""\ def my_func( - super_long_input_argument_0=0, - super_long_input_argument_1=1, - super_long_input_argument_2=2): + {long_arg}_0=0, + {long_arg}_1=1, + {long_arg}_2=2): pass - def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func", ) assert parser.statements == set() parser = self.parse_text( - """\ + f"""\ def my_func( - super_long_input_argument_0=0, - super_long_input_argument_1=1, - super_long_input_argument_2=2): + {long_arg}_0=0, + {long_arg}_1=1, + {long_arg}_2=2): pass - def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1, 5} parser = self.parse_text( - """\ + f"""\ def my_func ( - super_long_input_argument_0=0, - super_long_input_argument_1=1, - super_long_input_argument_2=2): + {long_arg}_0=0, + {long_arg}_1=1, + {long_arg}_2=2): pass - def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2 ({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1, 5} parser = self.parse_text( - """\ + f"""\ def my_func ( - super_long_input_argument_0=0, - super_long_input_argument_1=1, - super_long_input_argument_2=2): + {long_arg}_0=0, + {long_arg}_1=1, + {long_arg}_2=2): pass - def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2 ({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func", ) assert parser.statements == set() parser = self.parse_text( - """\ + f"""\ def my_func \ ( - super_long_input_argument_0=0, - super_long_input_argument_1=1 + {long_arg}_0=0, + {long_arg}_1=1 ): pass - def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1, 5} parser = self.parse_text( - """\ + f"""\ def my_func \ ( - super_long_input_argument_0=0, - super_long_input_argument_1=1 + {long_arg}_0=0, + {long_arg}_1=1 ): pass - def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): + def my_func_2({long_arg}_0=0, {long_arg}_1=1, {long_arg}_2=2): pass """, exclude="my_func", From 9ee5b3e9e053168caa4e7bd32389b5592ad9b503 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 08:22:47 -0400 Subject: [PATCH 21/43] chore: make upgrade --- doc/requirements.pip | 4 ++-- requirements/dev.pip | 6 +++--- requirements/kit.pip | 8 +++++--- requirements/mypy.pip | 2 +- requirements/pytest.pip | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/doc/requirements.pip b/doc/requirements.pip index c4b7d1357..e2d155d00 100644 --- a/doc/requirements.pip +++ b/doc/requirements.pip @@ -53,7 +53,7 @@ mdurl==0.1.2 # via markdown-it-py packaging==25.0 # via sphinx -pbr==7.0.0 +pbr==7.0.1 # via stevedore polib==1.2.0 # via sphinx-lint @@ -67,7 +67,7 @@ pygments==2.19.2 # sphinx regex==2025.7.34 # via sphinx-lint -requests==2.32.4 +requests==2.32.5 # via # scriv # sphinx diff --git a/requirements/dev.pip b/requirements/dev.pip index ac25132b7..a26cdd7be 100644 --- a/requirements/dev.pip +++ b/requirements/dev.pip @@ -53,7 +53,7 @@ flaky==3.8.1 # via -r requirements/pytest.in greenlet==3.2.4 # via -r requirements/dev.in -hypothesis==6.138.1 +hypothesis==6.138.2 # via -r requirements/pytest.in id==1.5.0 # via twine @@ -72,7 +72,7 @@ jaraco-classes==3.4.0 # via keyring jaraco-context==6.0.1 # via keyring -jaraco-functools==4.2.1 +jaraco-functools==4.3.0 # via keyring jedi==0.19.2 # via pudb @@ -144,7 +144,7 @@ readme-renderer==44.0 # via # -r requirements/dev.in # twine -requests==2.32.4 +requests==2.32.5 # via # -r requirements/dev.in # id diff --git a/requirements/kit.pip b/requirements/kit.pip index dca33fa42..d94e241a9 100644 --- a/requirements/kit.pip +++ b/requirements/kit.pip @@ -18,7 +18,7 @@ certifi==2025.8.3 # requests charset-normalizer==3.4.3 # via requests -cibuildwheel==3.1.3 +cibuildwheel==3.1.4 # via -r requirements/kit.in colorama==0.4.6 # via -r requirements/kit.in @@ -40,7 +40,7 @@ jaraco-classes==3.4.0 # via keyring jaraco-context==6.0.1 # via keyring -jaraco-functools==4.2.1 +jaraco-functools==4.3.0 # via keyring keyring==25.6.0 # via twine @@ -61,6 +61,8 @@ packaging==25.0 # cibuildwheel # dependency-groups # twine +patchelf==0.17.2.4 + # via cibuildwheel platformdirs==4.3.8 # via cibuildwheel pyelftools==0.32 @@ -75,7 +77,7 @@ pyproject-hooks==1.2.0 # via build readme-renderer==44.0 # via twine -requests==2.32.4 +requests==2.32.5 # via # id # requests-toolbelt diff --git a/requirements/mypy.pip b/requirements/mypy.pip index dec09ca80..43276298f 100644 --- a/requirements/mypy.pip +++ b/requirements/mypy.pip @@ -12,7 +12,7 @@ execnet==2.1.1 # via pytest-xdist flaky==3.8.1 # via -r requirements/pytest.in -hypothesis==6.138.1 +hypothesis==6.138.2 # via -r requirements/pytest.in iniconfig==2.1.0 # via pytest diff --git a/requirements/pytest.pip b/requirements/pytest.pip index 7936e493f..bf6317327 100644 --- a/requirements/pytest.pip +++ b/requirements/pytest.pip @@ -12,7 +12,7 @@ execnet==2.1.1 # via pytest-xdist flaky==3.8.1 # via -r requirements/pytest.in -hypothesis==6.138.1 +hypothesis==6.138.2 # via -r requirements/pytest.in iniconfig==2.1.0 # via pytest From 1f9f84079aef5aab496381502aa818ea0574982a Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 21 Aug 2025 08:37:47 -0400 Subject: [PATCH 22/43] build: tooling for ruff formatting --- .pre-commit-config.yaml | 5 +++++ pyproject.toml | 3 +-- requirements/dev.in | 1 + requirements/dev.pip | 2 ++ tox.ini | 1 + 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e58c04e3..8214d28f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,11 @@ repos: - id: trailing-whitespace exclude: "stress_phystoken|\\.py,cover$" + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.12.9 + hooks: + - id: ruff-format + - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: diff --git a/pyproject.toml b/pyproject.toml index 4471cdf24..32f8cb117 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -156,13 +156,12 @@ ghrel_template = """ """ ## RUFF -# We aren't using ruff for real yet... +# We are using ruff for formatting, but not for linting. [tool.ruff] # PYVERSION target-version = "py39" # Can't use [project] line-length = 100 -src = ["coverage", "tests"] [tool.ruff.lint] select = ["ALL"] diff --git a/requirements/dev.in b/requirements/dev.in index 366206ed9..d088d2a35 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -17,6 +17,7 @@ cogapp greenlet pylint readme_renderer +ruff # for kitting. libsass diff --git a/requirements/dev.pip b/requirements/dev.pip index a26cdd7be..d9a788779 100644 --- a/requirements/dev.pip +++ b/requirements/dev.pip @@ -157,6 +157,8 @@ rfc3986==2.0.0 # via twine rich==14.1.0 # via twine +ruff==0.12.9 + # via -r requirements/dev.in scriv==1.7.0 # via -r requirements/dev.in setuptools==80.9.0 diff --git a/tox.ini b/tox.ini index a908e5e81..3224d624a 100644 --- a/tox.ini +++ b/tox.ini @@ -104,6 +104,7 @@ commands = # If this command fails, see the comment at the top of doc/cmd.rst python -m cogapp -cP --check --verbosity=1 doc/*.rst python -m cogapp -cP --check --verbosity=1 .github/workflows/*.yml + ruff format --check python -m pylint -j 0 --notes= --ignore-paths 'doc/_build/.*' {env:LINTABLE} check-manifest --ignore 'doc/sample_html/*' # If 'build -q' becomes a thing (https://github.com/pypa/build/issues/188), From a5c18cc5eadc9df04005420207d53d74a94b9adf Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 23 Aug 2025 08:04:40 -0400 Subject: [PATCH 23/43] style: auto-generated changes shouldn't trigger ruff re-formatting --- igor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/igor.py b/igor.py index 54ac8d1bf..a2cc82e1c 100644 --- a/igor.py +++ b/igor.py @@ -406,7 +406,7 @@ def do_edit_for_release(): new_conf = textwrap.dedent( f"""\ # @@@ editable - copyright = "2009\N{EN DASH}{facts.now:%Y}, Ned Batchelder" # pylint: disable=redefined-builtin + copyright = "2009\N{EN DASH}{facts.now:%Y}, Ned Batchelder" # pylint: disable=redefined-builtin # The short X.Y.Z version. version = "{facts.shortver}" # The full version, including alpha/beta/rc tags. From b5bc6d409aefc11d0277fbf8af9c930bea686ed5 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 23 Aug 2025 08:07:14 -0400 Subject: [PATCH 24/43] docs: prep for 7.10.5 --- CHANGES.rst | 16 +++++++++------- coverage/version.py | 4 ++-- doc/conf.py | 6 +++--- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 43cd70779..f264538ee 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -20,20 +20,22 @@ upgrading your version of coverage.py. .. Version 9.8.1 — 2027-07-27 .. -------------------------- -Unreleased ----------- +.. start-releases + +.. _changes_7-10-5: + +Version 7.10.5 — 2025-08-23 +--------------------------- -- Big speed improvements for ``coverage combine``: it now takes about half the - time it used to! Huge thanks to Alex Gaynor for pull requests `2032 - `_, `2033 `_, and `2034 `_. +- Big speed improvements for ``coverage combine``: it's now about twice as + fast! Huge thanks to Alex Gaynor for pull requests `2032 `_, + `2033 `_, and `2034 `_. .. _pull 2032: https://github.com/nedbat/coveragepy/pull/2032 .. _pull 2033: https://github.com/nedbat/coveragepy/pull/2033 .. _pull 2034: https://github.com/nedbat/coveragepy/pull/2034 -.. start-releases - .. _changes_7-10-4: Version 7.10.4 — 2025-08-16 diff --git a/coverage/version.py b/coverage/version.py index b7ed4c3d4..a7cd7a5ff 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -8,8 +8,8 @@ # version_info: same semantics as sys.version_info. # _dev: the .devN suffix if any. -version_info = (7, 10, 5, "alpha", 0) -_dev = 1 +version_info = (7, 10, 5, "final", 0) +_dev = 0 def _make_version( diff --git a/doc/conf.py b/doc/conf.py index a84ef6288..75a012e73 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -67,11 +67,11 @@ # @@@ editable copyright = "2009–2025, Ned Batchelder" # pylint: disable=redefined-builtin # The short X.Y.Z version. -version = "7.10.4" +version = "7.10.5" # The full version, including alpha/beta/rc tags. -release = "7.10.4" +release = "7.10.5" # The date of release, in "monthname day, year" format. -release_date = "August 16, 2025" +release_date = "August 23, 2025" # @@@ end rst_epilog = f""" From 107ae05233005f8a5d497d8492c6410428dbbc28 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 23 Aug 2025 08:07:56 -0400 Subject: [PATCH 25/43] docs: sample HTML for 7.10.5 --- doc/sample_html/class_index.html | 8 ++++---- doc/sample_html/function_index.html | 8 ++++---- doc/sample_html/index.html | 8 ++++---- doc/sample_html/status.json | 2 +- doc/sample_html/z_7b071bdc2a35fa80___init___py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80___main___py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_cogapp_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_makefiles_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_test_cogapp_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_test_makefiles_py.html | 8 ++++---- .../z_7b071bdc2a35fa80_test_whiteutils_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_utils_py.html | 8 ++++---- doc/sample_html/z_7b071bdc2a35fa80_whiteutils_py.html | 8 ++++---- 14 files changed, 53 insertions(+), 53 deletions(-) diff --git a/doc/sample_html/class_index.html b/doc/sample_html/class_index.html index 434a50a7d..e03b7540e 100644 --- a/doc/sample_html/class_index.html +++ b/doc/sample_html/class_index.html @@ -56,8 +56,8 @@

Classes

- coverage.py v7.10.4, - created at 2025-08-16 19:42 -0400 + coverage.py v7.10.5, + created at 2025-08-23 08:07 -0400

@@ -567,8 +567,8 @@

- coverage.py v7.10.4, - created at 2025-08-16 19:42 -0400 + coverage.py v7.10.5, + created at 2025-08-23 08:07 -0400

diff --git a/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html b/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html index ce6c2de5a..1c64be138 100644 --- a/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html +++ b/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html @@ -66,8 +66,8 @@

^ index     » next       - coverage.py v7.10.4, - created at 2025-08-16 19:42 -0400 + coverage.py v7.10.5, + created at 2025-08-23 08:07 -0400