diff --git a/.github/workflows/deploy_pages.yaml b/.github/workflows/deploy_pages.yaml new file mode 100644 index 0000000..f3f5529 --- /dev/null +++ b/.github/workflows/deploy_pages.yaml @@ -0,0 +1,37 @@ +name: Publish Pages + +on: + push: + branches: [main] # branch to trigger deployment + +concurrency: + group: gh-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.12] + + steps: + - uses: actions/checkout@v5 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Build static site + run: | + make html + + - name: Deploy + uses: peaceiris/actions-gh-pages@v4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./build/html diff --git a/source/conf.py b/source/conf.py index 6eccd43..00919e3 100644 --- a/source/conf.py +++ b/source/conf.py @@ -20,7 +20,7 @@ project = 'Programming in Python' # the one with the text too -html_logo = "_static/UWPCE_logo_full.png" +# html_logo = "_static/UWPCE_logo_full.png" source_suffix = '.rst' # The master toctree document. @@ -44,12 +44,12 @@ author = ", ".join(sorted(author_list, key=lambda n: n.split()[-1])) -copyright = ("2020, University of Washington, {}. " +copyright = ("2025, {}. " "Creative Commons Attribution-ShareAlike 4.0 license".format(author) ).format(author) # The full version, including alpha/beta/rc tags -release = '7.0' +release = '8.0' # -- General configuration --------------------------------------------------- @@ -84,7 +84,6 @@ # 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard # 'github_url': 'https://github.com/UWPCE-PythonCert/ProgrammingInPython', 'logo_only': True, - 'display_version': False, 'prev_next_buttons_location': 'both', 'style_external_links': True, # 'vcs_pageview_mode': '', diff --git a/source/exercises/unit_testing/sum_13.py b/source/exercises/unit_testing/sum_13.py new file mode 100644 index 0000000..c23322b --- /dev/null +++ b/source/exercises/unit_testing/sum_13.py @@ -0,0 +1,114 @@ +""" + +Test driven development: + + +Example from Coding Bat: List-2 > sum13 + +https://codingbat.com/prob/p167025 + +Return the sum of the numbers in the array, returning 0 for an empty array. Except the number 13 is very unlucky, so it does not count and numbers that come immediately after a 13 also do not count. + + +sum13([1, 2, 2, 1]) → 6 +sum13([1, 1]) → 2 +sum13([1, 2, 2, 1, 13]) → 6 +sum13([1, 2, 2, 1]) → 6 +sum13([1, 1]) → 2 +sum13([1, 2, 2, 1, 13]) → 6 +sum13([1, 2, 13, 2, 1, 13]) → 4 +sum13([13, 1, 2, 13, 2, 1, 13]) → 3 +sum13([]) → 0 +sum13([13]) → 0 +sum13([13, 13]) → 0 +sum13([13, 0, 13]) → 0 +sum13([13, 1, 13]) → 0 +sum13([5, 7, 2]) → 14 +sum13([5, 13, 2]) → 5 +sum13([0]) → 0 +sum13([13, 0]) → 0 +""" + +import pytest + +def sum13(nums): + """ + non-functional -- but the tests will run (and fail) + """ + return None + +# def sum13(nums): +# """ +# simple sum -- no special handling of 13 -- should pass some tests. +# """ +# return sum(nums) + +# def sum13(nums): +# """ +# using a comprehension to filter out the 13s + +# - more tests should pass, but not all. +# """ +# return sum(n for n in nums if n!=13) + + +# def sum13(nums): +# """ +# darn -- comprehension can't handle the "after a 13" case + +# do it from scratch with while loop + +# fails the two 13s in a row test! +# """ +# total = 0 +# i = 0 +# while i < len(nums): +# if nums[i] != 13: +# total += nums[i] +# else: +# i += 1 +# i += 1 +# return total + + +# def sum13(nums): +# """ +# Use a for loop, and keep track of the previous 13 + +# passes all tests! +# """ +# print(nums) +# total = 0 +# prev_13 = False +# for i, n in enumerate(nums): +# if n == 13: +# prev_13 = True +# continue +# elif prev_13: +# prev_13 = False +# continue +# else: +# total += n +# return total + + +# def sum13(nums): +# """ +# Use the iterator protocol -- nifty? but not any simpler really. + +# Fails for repeated 13 in middle + +# Works with any iterable, so that's nice. +# """ +# total = 0 +# nums_i = iter(nums) +# for n in nums_i: +# if n != 13: +# total += n +# else: +# try: +# next(nums_i) +# # this is necessary for the case where there's a 13 at the end. +# except StopIteration: +# break +# return total diff --git a/source/exercises/unit_testing/test_sum_13.py b/source/exercises/unit_testing/test_sum_13.py new file mode 100644 index 0000000..fba6a77 --- /dev/null +++ b/source/exercises/unit_testing/test_sum_13.py @@ -0,0 +1,70 @@ +""" + +Test driven development: + +Example from Coding Bat: List-2 > sum13 + +https://codingbat.com/prob/p167025 + +Return the sum of the numbers in the array, returning 0 for an empty array. Except the number 13 is very unlucky, so it does not count and numbers that come immediately after a 13 also do not count. + + +The tests that are used on codingbat: + +sum13([1, 2, 2, 1]) → 6 +sum13([1, 1]) → 2 +sum13([1, 2, 2, 1, 13]) → 6 +sum13([1, 2, 2, 1]) → 6 +sum13([1, 1]) → 2 +sum13([1, 2, 2, 1, 13]) → 6 +sum13([1, 2, 13, 2, 1, 13]) → 4 +sum13([13, 1, 2, 13, 2, 1, 13]) → 3 +sum13([]) → 0 +sum13([13]) → 0 +sum13([13, 13]) → 0 +sum13([13, 0, 13]) → 0 +sum13([13, 1, 13]) → 0 +sum13([5, 7, 2]) → 14 +sum13([5, 13, 2]) → 5 +sum13([0]) → 0 +sum13([13, 0]) → 0 +""" + +import pytest + +from sum_13 import sum13 + + +# Using the nifty pytest.parametrize, so we only have to write one test + +test_data = [ + ([1, 2, 2, 1], 6), + ([1, 1], 2), + ([1, 2, 2, 1, 13], 6), + ([1, 2, 2, 1], 6), + ([1, 1], 2), + ([1, 2, 2, 1, 13], 6), + ([1, 2, 13, 2, 1, 13], 4), + ([13, 1, 2, 13, 2, 1, 13], 3), + ([], 0), + ([13], 0), + ([13, 13], 0), + ([13, 0, 13], 0), + ([13, 1, 13], 0), + ([5, 7, 2], 14), + ([5, 13, 2], 5), + ([0], 0), + ([13, 0], 0), + # These are not part of original test suite + # uncomment them, and see if your solution still passes. + # ([3, 13, 13, 2, 5], 8), + # (iter([13, 1, 2, 13, 2, 1, 13]), 3), # Does it work with an iterable? + ] + +@pytest.mark.parametrize('nums, result', test_data) +def test_sum13(nums, result): + assert sum13(nums) == result + + + + diff --git a/source/exercises/unit_testing/unit_testing.rst b/source/exercises/unit_testing/unit_testing.rst index e8658e9..53663de 100644 --- a/source/exercises/unit_testing/unit_testing.rst +++ b/source/exercises/unit_testing/unit_testing.rst @@ -92,6 +92,22 @@ Now edit the function in ``walnut_party.py``, and each time you make a change, r When the tests pass -- you are done! That's the beauty of test-driven development. +Another Example: +---------------- + +Here's another example from the codingbat site: + +:download:`sum_13.py ` + +and + +:download:`test_sum_13.py ` + + + + + + Doing your own: --------------- diff --git a/source/index.rst b/source/index.rst index cbd2abd..cd21246 100644 --- a/source/index.rst +++ b/source/index.rst @@ -7,11 +7,14 @@ Programming in Python ##################### -This site holds many of the materials for the +This site holds a set of materials for an introductory course in Python. + +The course was originally developed by a wide variety of instructors for the `University of Washington Professional and Continuing Education Python Certificate `_ `Introductory class `_ -This site can be thought of as the textbook for Programming in Python: the first course in the program. It contains notes about the topics covered in the classes, programming exercises, supplemental materials about setting up a development environment, and assorted references about Python-related topics. +This site can be thought of as the textbook for an introduction to programming in Python: +It contains notes about the topics covered in the classes, programming exercises, supplemental materials about setting up a development environment, and assorted references about Python-related topics. Many of these topics can be useful on their own, but each assumes that you know concepts that were introduced earlier in the program, so working through them in order can be helpful. @@ -35,7 +38,7 @@ They are built with the Sphinx documentation system, utilizing Restructured Text It is managed in this gitHub repository: -https://github.com/UWPCE-PythonCert/ProgrammingInPython +https://github.com/PythonCHB/ProgrammingInPython Readers are encouraged to report omissions, typos, or make suggestions for improvements via issues and pull requests on that repository. @@ -45,7 +48,7 @@ Example Code Assorted Example code can be found in the source repository for these documents. Most of the examples are linked to directly from these documents, but it might be helpful to have them all in one place: -https://github.com/UWPCE-PythonCert/ProgrammingInPython/tree/master/source/examples +https://github.com/PythonCHB/ProgrammingInPython/tree/main/source/examples .. Indices and tables diff --git a/source/modules/TestDrivenDevelopment.rst b/source/modules/TestDrivenDevelopment.rst index b8a76b4..d6fbe28 100644 --- a/source/modules/TestDrivenDevelopment.rst +++ b/source/modules/TestDrivenDevelopment.rst @@ -1,8 +1,6 @@ .. _test_driven_development: -FIXME: change the path from my personal to something generic - ####################### Test Driven Development ####################### @@ -16,7 +14,7 @@ Test Driven Development "Test Driven Development" (TDD) is a development strategy that integrates the development of unit tests with the code itself. In particular, you write the tests *before* you write the code, which seems pretty backward, but it has some real strengths. We'll demonstrate this technique with an example. -  + The following is adapted from Mark Pilgrim's excellent "Dive into Python": https://diveintopython3.problemsolving.io/ @@ -105,7 +103,7 @@ want it to. You read that right: you’re going to write code that tests code that you haven’t written yet. This is called *test-driven development*, or TDD. The set of two -conversion functions — ``to_roman()``, and later ``from_roman()`` — can +conversion functions — ``to_roman()``, and later ``from_roman()`` — can be written and tested as a unit, separate from any larger program that uses them. @@ -115,9 +113,7 @@ Technically, you can write unit tests with plain Python -- recall the ``assert`` $ python -m pip install pytest - Once installed, you should have the pytest command available in your terminal. - -FIXME: Maybe add a small page on installing and using pytest? +Once installed, you should have the pytest command available in your terminal. Unit testing is an important part of an overall testing-centric development strategy. If you write unit tests, it is important to write @@ -262,7 +258,7 @@ You don’t need to test every possible input, but you should try to test all th .. note:: This is a major challenge of unit testing -- how to catch all the edge cases, without over testing every little thing. -`pytest` makes it really simple to write a test case: simply define a function named ``test_anything``. pytest will identify any function with: "``test_``"" at the start of the name as a test function. +`pytest` makes it really simple to write a test case: simply define a function named ``test_anything``. pytest will identify any function with: "``test_``" at the start of the name as a test function. * Every individual test is its own function. A test function takes no parameters, returns no value, and must have a name beginning with the five letters ``test_``. If a test function exits normally without a failing assertion or other exception, the test is considered passed; if the function raises a failed assertion, failed. @@ -604,7 +600,7 @@ sort of failure; they must fail in the way you expect. In [13]: to_roman(9000) Out[13]: 'MMMMMMMMM' -That’s definitely *not* what you wanted — that’s not even a valid Roman +That’s definitely *not* what you wanted — that’s not even a valid Roman numeral! In fact, after 3000, each of these numbers is outside the range of acceptable input, but the function returns a bogus value anyway. @@ -684,7 +680,7 @@ code to pass it yet. Did it fail in the way you expected? Yes! ``pytest.raises`` did its job -- a ``ValueError`` was not raised, and the test failed. Of course, the ``to_roman()`` function isn’t raising the ``ValueError`` because you haven’t told it to do that yet. -That’s excellent news! It means this is a valid test case — it fails before you write the code to make it pass. +That’s excellent news! It means this is a valid test case — it fails before you write the code to make it pass. Now you can write the code to make this test pass. diff --git a/source/modules/Testing.rst b/source/modules/Testing.rst index a9ea6d9..6acfbe9 100644 --- a/source/modules/Testing.rst +++ b/source/modules/Testing.rst @@ -9,16 +9,47 @@ This page is a quick overview of testing in Python. It provides some background Testing your code is an absolute necessity -- you need to have *some* way to know it's doing what it should. +Everyone tests their code -- that's how you know it works. + +But when folks talk about "testing" -- what they really mean are automated tests. + Having your testing done in an automated way is really a good idea. -You've already seen a very basic testing strategy: putting some ``assert`` statements in the ``__name__ == "__main__"`` block. -You've written some tests using that strategy. +What is testing? +================ + +Code which runs your application in as close to a real environment as feasible and validates its behavior + + +Terminology of testing +---------------------- + +- Unit tests +- Integration tests +- High level system tests +- Acceptance tests +- Black box / White box testing + + +"V" model and tests levels +-------------------------- +.. image:: /_static/test_v_model.png + +Note that "codeing" is at the bottom, and directly tied to unit-testing -- that's the place to start. + +About Unit Testing +------------------ -These tests were pretty basic, and a bit awkward in places (testing error -conditions in particular). +0. Tests can be fully automated. +1. Tests should be independent. +2. Tests do not run in order, which shouldn't matter, see point 1. +3. Test fixtures are available to do any setup / teardown needed for tests. +4. Test behavior should not be implementation dependent. +5. Mocking is available to fake stuff you may not want to run in your tests. + +This all applies regardless of your test framework -.. centered:: **It gets better** Test Frameworks --------------- @@ -49,108 +80,9 @@ It is a bit verbose: you have to write classes & methods (And we haven't covered But you will see it used in others' code, so it's good to be familiar with it. And seeing how verbose it can be will help you appreciate other options. -So here's a bit of an introduction -- if the class stuff confuses you, don't worry about it -- you don't need to actually DO this yourself at this point. - - -Using ``unittest`` ------------------- - -To use ``unittest``, you need to write subclasses of the ``unittest.TestCase`` class (after importing the package, of course): - -.. code-block:: python - - # in test.py - import unittest - - class MyTests(unittest.TestCase): - - def test_tautology(self): - self.assertEqual(1, 1) - -Then you run the tests by using the ``main`` function from the ``unittest`` -module: - -.. code-block:: python - - # in test.py - if __name__ == '__main__': - unittest.main() - -``unittest.main()`` is called in the module where the tests are. Which means that they can be, but do not have to be, in the same file as your code. - -NOTE: tests can also be run by "test runners" for more features. - - -Testing Your Code ------------------ - -You can write your code in one file and test it from another -- and for all but the smallest projects, you want to do that. - -in ``my_mod.py``: - -.. code-block:: python - - def my_func(val1, val2): - return val1 * val2 - -in ``test_my_mod.py``: - -.. code-block:: python - - import unittest - from my_mod import my_func - - - class MyFuncTestCase(unittest.TestCase): - def test_my_func(self): - test_val1, test_val2 = 2, 3 - expected = 6 - actual = my_func(test_val1, test_val2) - self.assertEqual(expected, actual) - - if __name__ == '__main__': - unittest.main() - -So this is pretty straightforward, but it's kind of a lot of code for just one test, yes? - - -Advantages of ``unittest`` --------------------------- - -The ``unittest`` module is pretty full featured - -It comes with the standard Python distribution, no installation required. - -It provides a wide variety of assertions for testing many types of results. - -It allows for a "set up" and "tear down" work flow both before and after all tests and before and after each test. - -It's well known and well understood. - - -Disadvantages of ``unittest`` ------------------------------ - -It's Object Oriented, and quite "heavyweight". - - - modeled after Java's ``JUnit``. - -It uses the Framework design pattern, so knowing how to use the features means learning what to override. - -Needing to override means you have to be cautious. - -Test discovery is both inflexible and brittle. - -It doesn't really take advantage of Python's introspection capabilities: - - There are explicit "assert" methods for each type of test - - The available assertions are not the least bit complete - - All the assertions really do is provide pretty printing of errors - -Testing for Exceptions is awkward - -Test discovery is limited +See :ref:`advanced_testing` for more on that. -And there is no built-in parameterized testing. +For now -- we'regoingto us pytest -- far simpler to get started, and advanced enough for the largest, most complex projects. Other Options diff --git a/source/modules/Testing_advanced.rst b/source/modules/Testing_advanced.rst index 8df9285..d899510 100644 --- a/source/modules/Testing_advanced.rst +++ b/source/modules/Testing_advanced.rst @@ -1,4 +1,3 @@ -:orphan: .. _advanced_testing: @@ -13,8 +12,7 @@ What is testing? ================ -Code which runs your application in as close to a real environment as -feasible and validates its behavior +Code which runs your application in as close to a real environment as feasible and validates its behavior Terminology of testing @@ -46,15 +44,16 @@ Unit testing What should be tested? ---------------------- -The percentage of code which gets run in a test is known as the -"coverage". +The percentage of code which gets run in a test is known as the "coverage". -100% coverage is an ideal to strive for. But the decision on when and -what to test should take into account the volatility of the project. +**100% coverage is neither necessary nor sufficient** + +100% coverage is an ideal to strive for. +But the decision on when and what to test should take into account the volatility of the project, and the difficulty of testing some edge cases / error conditions. **NOTE** Even if every line of code is run during tests (100% coverage), -they may not be comprehensive! It is very hard to anticipate every weird -input some code may get. +they *will not* be comprehensive! +It is very hard to anticipate every weird input some code may get. Unit Testing tools @@ -64,7 +63,7 @@ Unit Testing tools http://docs.python.org/3/library/unittest.html -- pytest, a test runner, and also an alternative to unittest, which you should be pretty familiar with now +- pytest, a test runner (it can run unittest ``TestCase``, and is also an alternative to unittest. http://pytest.org/latest/ @@ -72,19 +71,11 @@ Unit Testing tools https://docs.python.org/dev/library/unittest.mock.html -Note that while mock is in the ``unittest`` package, you do not need to be using ``unittest`` tests to use it. - +- coverage, a package for measuring test coverage. -About Unit Testing ------------------- +Note that while mock is in the ``unittest`` package, you do not need to be using ``unittest`` tests to use it. -1. Tests should be independent. -2. Tests do not run in order, which shouldn't matter, see point 1. -3. Test fixtures are available to do any setup / teardown needed for tests. -4. Test behavior not implementation dependent. -5. Mocking is available to fake stuff you may not want to run in your tests. -This all applies regardless of your test framework unittest -------- @@ -213,17 +204,17 @@ Tests can also be organized into suites in the block -TestRunners: pytest and Nose2 ------------------------------ +TestRunner: pytest +------------------ -Nose2 is the new nose. Nose is no longer maintained, and directs users to nose2. +pytest is a very widely used and comprehensive test runner: it can auto-discover test cases, run them, and report of the results. -Both pytest and Nose2 are test runners: they auto-discover test cases. +NOTE: Way back in the day, there was a test runner called "nose" -- there is some chance you may still see it used in older projects. It's similar to, but with fewer features than pytest. -They will find tests for you so you can focus on writing tests, not +It will find tests for you so you can focus on writing tests, not maintaining test suites. -To find tests, pytest and nose look for modules (such as python files) +To find tests, pytest looks for modules (such as python files) whose names start with ‘test’. In those modules, they will load tests from all unittest.TestCase subclasses, as well as functions whose names start with ‘test’. @@ -236,17 +227,8 @@ So running your tests is as easy as or -.. code-block:: bash - - $ nose2 - -http://nose2.readthedocs.org/en/latest/getting_started.html#running-tests - https://docs.pytest.org/en/latest/index.html -A number of projects use nose -- so you may encounter it, but we'll focus -on pytest for now. - Fixtures: Setting up your tests for success ------------------------------------------- @@ -256,8 +238,7 @@ Fixtures: Setting up your tests for success Test fixtures are a fixed baseline for tests to run from consistently, also known as test context. -Fixtures can (and should) be set up fresh before each test, once before each test -case, or before an entire test suite. +Fixtures can (and should) be set up fresh before each test, once before each test case, or before an entire test suite. Fixtures in unittest @@ -533,7 +514,7 @@ But when you use assertTrue:: self.assertTrue(math.isclose(4 * .15e-30, .45e-30)) AssertionError: False is not true -Not that helpful -- is it? I thikn we all already know that False is not true ;-) +Not that helpful -- is it? I think we all already know that False is not true ;-) ``pytest`` give you nice informative messages when tests fail -- without special asserts. @@ -543,7 +524,7 @@ Parameterized Tests Often you want to run exactly the same tests, but with different outputs and inputs. -You can do this a really naive way, by putting multiple asserts into one test: +You can do this in a really naive way, by putting multiple asserts into one test: .. code-block:: python @@ -583,7 +564,7 @@ But talk about tedious!!! Unfortunately, ``unittest`` does not have a built-in way to solve this problem. There is a nifty library called parameterized, which does solve it (and they spell parameterize correctly). -It works with nose, unittest, and pytest. +It works with unittest, and pytest. https://pypi.python.org/pypi/parameterized @@ -641,16 +622,17 @@ Keep in mind that 100% coverage does **NOT** mean that your code is *fully* test But it's a good start. +And low coverage means that your code is very sparsely tested -- not great! + The coverage tool ----------------- -``coverage.py`` is a tool (written by Ned Batchelder) for checking code testing -coverage in python: +``coverage.py`` is a tool (written by Ned Batchelder) for checking code testing coverage in python: https://coverage.readthedocs.io -It can be installed with ``pip``: +It can be installed with ``pip`` (or conda): .. code-block:: bash @@ -712,6 +694,8 @@ Using coverage with pytest There is a plug-in for pytest that will run coverage for you when you run your tests: +https://pytest-cov.readthedocs.io/ :: + .. code-block:: bash $ pip install pytest-cov @@ -719,7 +703,11 @@ There is a plug-in for pytest that will run coverage for you when you run your t # now it can be used $ pytest --cov code_module test_module.py -https://pypi.python.org/pypi/pytest-cov +or + +.. code-block:: bash + + $ conda install pytest-cov There are a number of ways to invoke it and get different reports: @@ -730,6 +718,8 @@ To get a nifty html report: $ pytest --cov code_module --cov-report html test_module.py +Or you can run ``coverage html`` after running the tests. + Doctests ======== @@ -775,7 +765,8 @@ Well worth checking out -- and you can have Sphinx run your doctests for you. My Take: -------- -doctests are really cool -- but they are more a way to test your documentation, than a way to test your code. Which is great -- you can have examples in your docs, and know that they are still correct. +doctests are really cool -- but they are more a way to test your documentation, than a way to test your code. +Which is great -- you can have examples in your docs, and know that they are still correct. Test Driven Development (TDD) diff --git a/source/topics/07-unit_testing/index.rst b/source/topics/07-unit_testing/index.rst index f23f4ee..3404595 100644 --- a/source/topics/07-unit_testing/index.rst +++ b/source/topics/07-unit_testing/index.rst @@ -7,6 +7,7 @@ Unit Testing ../../modules/Testing ../../modules/TestDrivenDevelopment + ../../modules/Testing_advanced .. toctree:: :caption: Activities