diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..9be06b93ca
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,2 @@
+[run]
+source = redis
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..7b9bc9b2df
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,5 @@
+**/__pycache__
+**/*.pyc
+.tox
+.coverage
+.coverage.*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000000..7323c14392
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,8 @@
+Thanks for wanting to report an issue you've found in redis-py. Please delete this text and fill in the template below.
+It is of course not always possible to reduce your code to a small test case, but it's highly appreciated to have as much data as possible. Thank you!
+
+**Version**: What redis-py and what redis version is the issue happening on?
+
+**Platform**: What platform / version? (For example Python 3.5.1 on Windows 7 / Ubuntu 15.10 / Azure)
+
+**Description**: Description of your issue, stack traces from errors and code that reproduces the issue
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..a3b0b0e4e7
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,17 @@
+### Pull Request check-list
+
+_Please make sure to review and check all of these items:_
+
+- [ ] Does `$ tox` pass with this change (including linting)?
+- [ ] Do the CI tests pass with this change (enable it first in your forked repo and wait for the github action build to finish)?
+- [ ] Is the new or changed code fully tested?
+- [ ] Is a documentation update included (if this change modifies existing APIs, or introduces new ones)?
+- [ ] Is there an example added to the examples folder (if applicable)?
+- [ ] Was the change added to CHANGES file?
+
+_NOTE: these things are not required to open a PR and can be done
+afterwards / while the PR is open._
+
+### Description of change
+
+_Please provide a description of the change here._
diff --git a/.github/release-drafter-config.yml b/.github/release-drafter-config.yml
new file mode 100644
index 0000000000..9ccb28aca4
--- /dev/null
+++ b/.github/release-drafter-config.yml
@@ -0,0 +1,48 @@
+name-template: '$NEXT_MINOR_VERSION'
+tag-template: 'v$NEXT_MINOR_VERSION'
+autolabeler:
+ - label: 'maintenance'
+ files:
+ - '*.md'
+ - '.github/*'
+ - label: 'bug'
+ branch:
+ - '/bug-.+'
+ - label: 'maintenance'
+ branch:
+ - '/maintenance-.+'
+ - label: 'feature'
+ branch:
+ - '/feature-.+'
+categories:
+ - title: 'Breaking Changes'
+ labels:
+ - 'breakingchange'
+ - title: 'đ§Ș Experimental Features'
+ labels:
+ - 'experimental'
+ - title: 'đ New Features'
+ labels:
+ - 'feature'
+ - 'enhancement'
+ - title: 'đ Bug Fixes'
+ labels:
+ - 'fix'
+ - 'bugfix'
+ - 'bug'
+ - 'BUG'
+ - title: 'đ§° Maintenance'
+ label: 'maintenance'
+change-template: '- $TITLE (#$NUMBER)'
+exclude-labels:
+ - 'skip-changelog'
+template: |
+ # Changes
+
+ $CHANGES
+
+ ## Contributors
+ We'd like to thank all the contributors who worked on this release!
+
+ $CONTRIBUTORS
+
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000000..e82e7e1530
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,68 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://git.io/codeql-language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # âčïž Command-line programs to run using the OS shell.
+ # đ https://git.io/JvXDl
+
+ # âïž If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/install_and_test.sh b/.github/workflows/install_and_test.sh
new file mode 100755
index 0000000000..33a1edb1e7
--- /dev/null
+++ b/.github/workflows/install_and_test.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+set -e
+
+SUFFIX=$1
+if [ -z ${SUFFIX} ]; then
+ echo "Supply valid python package extension such as whl or tar.gz. Exiting."
+ exit 3
+fi
+
+script=`pwd`/${BASH_SOURCE[0]}
+HERE=`dirname ${script}`
+ROOT=`realpath ${HERE}/../..`
+
+cd ${ROOT}
+DESTENV=${ROOT}/.venvforinstall
+if [ -d ${DESTENV} ]; then
+ rm -rf ${DESTENV}
+fi
+python -m venv ${DESTENV}
+source ${DESTENV}/bin/activate
+pip install --upgrade --quiet pip
+pip install --quiet -r dev_requirements.txt
+invoke devenv
+invoke package
+
+# find packages
+PKG=`ls ${ROOT}/dist/*.${SUFFIX}`
+ls -l ${PKG}
+
+TESTDIR=${ROOT}/STAGETESTS
+if [ -d ${TESTDIR} ]; then
+ rm -rf ${TESTDIR}
+fi
+mkdir ${TESTDIR}
+cp -R ${ROOT}/tests ${TESTDIR}/tests
+cd ${TESTDIR}
+
+# install, run tests
+pip install ${PKG}
+# Redis tests
+pytest -m 'not onlycluster'
+# RedisCluster tests
+CLUSTER_URL="redis://localhost:16379/0"
+pytest -m 'not onlynoncluster and not redismod and not ssl' --redis-url=${CLUSTER_URL}
diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml
new file mode 100644
index 0000000000..0f9db8fb1a
--- /dev/null
+++ b/.github/workflows/integration.yaml
@@ -0,0 +1,121 @@
+name: CI
+
+on:
+ push:
+ paths-ignore:
+ - 'docs/**'
+ - '**/*.rst'
+ - '**/*.md'
+ branches:
+ - master
+ - '[0-9].[0-9]'
+ pull_request:
+ branches:
+ - master
+ - '[0-9].[0-9]'
+ schedule:
+ - cron: '0 1 * * *' # nightly build
+
+permissions:
+ contents: read # to fetch code (actions/checkout)
+
+jobs:
+
+ dependency-audit:
+ name: Dependency audit
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: pypa/gh-action-pip-audit@v1.0.0
+ with:
+ inputs: requirements.txt dev_requirements.txt
+ ignore-vulns: |
+ GHSA-w596-4wvx-j9j6 # subversion related git pull, dependency for pytest. There is no impact here.
+
+ lint:
+ name: Code linters
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+ cache: 'pip'
+ - name: run code linters
+ run: |
+ pip install -r dev_requirements.txt
+ invoke linters
+
+ run-tests:
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+ strategy:
+ max-parallel: 15
+ matrix:
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', 'pypy-3.7', 'pypy-3.8', 'pypy-3.9']
+ test-type: ['standalone', 'cluster']
+ connection-type: ['hiredis', 'plain']
+ env:
+ ACTIONS_ALLOW_UNSECURE_COMMANDS: true
+ name: Python ${{ matrix.python-version }} ${{matrix.test-type}}-${{matrix.connection-type}} tests
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+ - name: run tests
+ run: |
+ pip install -U setuptools wheel
+ pip install -r dev_requirements.txt
+ tox -e ${{matrix.test-type}}-${{matrix.connection-type}}
+ - uses: actions/upload-artifact@v2
+ if: success() || failure()
+ with:
+ name: pytest-results-${{matrix.test-type}}
+ path: '${{matrix.test-type}}*results.xml'
+ - name: Upload codecov coverage
+ uses: codecov/codecov-action@v3
+ with:
+ fail_ci_if_error: false
+ # - name: View Test Results
+ # uses: dorny/test-reporter@v1
+ # if: success() || failure()
+ # with:
+ # name: Test Results ${{matrix.python-version}} ${{matrix.test-type}}-${{matrix.connection-type}}
+ # path: '${{matrix.test-type}}*results.xml'
+ # reporter: java-junit
+ # list-suites: failed
+ # list-tests: failed
+ # max-annotations: 10
+
+ build_and_test_package:
+ name: Validate building and installing the package
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ extension: ['tar.gz', 'whl']
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+ - name: Run installed unit tests
+ run: |
+ bash .github/workflows/install_and_test.sh ${{ matrix.extension }}
+
+ install_package_from_commit:
+ name: Install package from commit hash
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', 'pypy-3.7', 'pypy-3.8', 'pypy-3.9']
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+ - name: install from pip
+ run: |
+ pip install --quiet git+${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git@${GITHUB_SHA}
diff --git a/.github/workflows/pypi-publish.yaml b/.github/workflows/pypi-publish.yaml
new file mode 100644
index 0000000000..50332c1995
--- /dev/null
+++ b/.github/workflows/pypi-publish.yaml
@@ -0,0 +1,34 @@
+name: Publish tag to Pypi
+
+on:
+ release:
+ types: [published]
+
+permissions:
+ contents: read # to fetch code (actions/checkout)
+
+jobs:
+
+ build_and_package:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: install python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+ - name: Install dev tools
+ run: |
+ pip install -r dev_requirements.txt
+ pip install twine wheel
+
+ - name: Build package
+ run: |
+ python setup.py build
+ python setup.py sdist bdist_wheel
+
+ - name: Publish to Pypi
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: __token__
+ password: ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
new file mode 100644
index 0000000000..eebb3e678b
--- /dev/null
+++ b/.github/workflows/release-drafter.yml
@@ -0,0 +1,24 @@
+name: Release Drafter
+
+on:
+ push:
+ # branches to consider in the event; optional, defaults to all
+ branches:
+ - master
+
+permissions: {}
+jobs:
+ update_release_draft:
+ permissions:
+ pull-requests: write # to add label to PR (release-drafter/release-drafter)
+ contents: write # to create a github release (release-drafter/release-drafter)
+
+ runs-on: ubuntu-latest
+ steps:
+ # Drafts your next Release notes as Pull Requests are merged into "master"
+ - uses: release-drafter/release-drafter@v5
+ with:
+ # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
+ config-name: release-drafter-config.yml
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml
new file mode 100644
index 0000000000..32fd9e8179
--- /dev/null
+++ b/.github/workflows/stale-issues.yml
@@ -0,0 +1,25 @@
+name: "Close stale issues"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions: {}
+jobs:
+ stale:
+ permissions:
+ issues: write # to close stale issues (actions/stale)
+ pull-requests: write # to close stale PRs (actions/stale)
+
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue is marked stale. It will be closed in 30 days if it is not updated.'
+ stale-pr-message: 'This pull request is marked stale. It will be closed in 30 days if it is not updated.'
+ days-before-stale: 365
+ days-before-close: 30
+ stale-issue-label: "Stale"
+ stale-pr-label: "Stale"
+ operations-per-run: 10
+ remove-stale-when-updated: true
diff --git a/.gitignore b/.gitignore
index 05578d77dd..b392a2d748 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,14 @@ dump.rdb
_build
vagrant/.vagrant
.python-version
+.cache
+.eggs
+.idea
+.coverage
+env
+venv
+coverage.xml
+.venv*
+*.xml
+.coverage*
+docker/stunnel/keys
diff --git a/.mypy.ini b/.mypy.ini
new file mode 100644
index 0000000000..942574e0f3
--- /dev/null
+++ b/.mypy.ini
@@ -0,0 +1,24 @@
+[mypy]
+#, docs/examples, tests
+files = redis
+check_untyped_defs = True
+follow_imports_for_stubs asyncio.= True
+#disallow_any_decorated = True
+disallow_subclassing_any = True
+#disallow_untyped_calls = True
+disallow_untyped_decorators = True
+#disallow_untyped_defs = True
+implicit_reexport = False
+no_implicit_optional = True
+show_error_codes = True
+strict_equality = True
+warn_incomplete_stub = True
+warn_redundant_casts = True
+warn_unreachable = True
+warn_unused_ignores = True
+disallow_any_unimported = True
+#warn_return_any = True
+
+[mypy-redis.asyncio.lock]
+# TODO: Remove once locks has been rewritten
+ignore_errors = True
diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 0000000000..800cb14816
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,14 @@
+version: 2
+
+python:
+ install:
+ - requirements: ./docs/requirements.txt
+ - requirements: requirements.txt
+
+build:
+ os: ubuntu-20.04
+ tools:
+ python: "3.9"
+
+sphinx:
+ configuration: docs/conf.py
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index cf38f4b782..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-language: python
-python:
- - "3.3"
- - "3.2"
- - "2.7"
- - "2.6"
-services:
- - redis-server
-env:
- - TEST_HIREDIS=0
- - TEST_HIREDIS=1
-install:
- - pip install -e .
- - "if [[ $TEST_PEP8 == '1' ]]; then pip install pep8; fi"
- - "if [[ $TEST_HIREDIS == '1' ]]; then pip install hiredis; fi"
-script: "if [[ $TEST_PEP8 == '1' ]]; then pep8 --repeat --show-source --exclude=.venv,.tox,dist,docs,build,*.egg .; else python setup.py test; fi"
-matrix:
- include:
- - python: "2.7"
- env: TEST_PEP8=1
- - python: "3.4"
- env: TEST_PEP8=1
diff --git a/CHANGES b/CHANGES
index 693593e54b..b0744c6038 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,667 @@
+ * Allow data to drain from async PythonParser when reading during a disconnect()
+ * Use asyncio.timeout() instead of async_timeout.timeout() for python >= 3.11 (#2602)
+ * Add test and fix async HiredisParser when reading during a disconnect() (#2349)
+ * Use hiredis-py pack_command if available.
+ * Support `.unlink()` in ClusterPipeline
+ * Simplify synchronous SocketBuffer state management
+ * Fix string cleanse in Redis Graph
+ * Make PythonParser resumable in case of error (#2510)
+ * Add `timeout=None` in `SentinelConnectionManager.read_response`
+ * Documentation fix: password protected socket connection (#2374)
+ * Allow `timeout=None` in `PubSub.get_message()` to wait forever
+ * add `nowait` flag to `asyncio.Connection.disconnect()`
+ * Update README.md links
+ * Fix timezone handling for datetime to unixtime conversions
+ * Fix start_id type for XAUTOCLAIM
+ * Remove verbose logging from cluster.py
+ * Add retry mechanism to async version of Connection
+ * Compare commands case-insensitively in the asyncio command parser
+ * Allow negative `retries` for `Retry` class to retry forever
+ * Add `items` parameter to `hset` signature
+ * Create codeql-analysis.yml (#1988). Thanks @chayim
+ * Add limited support for Lua scripting with RedisCluster
+ * Implement `.lock()` method on RedisCluster
+ * Fix cursor returned by SCAN for RedisCluster & change default target to PRIMARIES
+ * Fix scan_iter for RedisCluster
+ * Remove verbose logging when initializing ClusterPubSub, ClusterPipeline or RedisCluster
+ * Fix broken connection writer lock-up for asyncio (#2065)
+ * Fix auth bug when provided with no username (#2086)
+ * Fix missing ClusterPipeline._lock (#2189)
+ * Added dynaminc_startup_nodes configuration to RedisCluster
+ * Fix reusing the old nodes' connections when cluster topology refresh is being done
+ * Fix RedisCluster to immediately raise AuthenticationError without a retry
+ * ClusterPipeline Doesn't Handle ConnectionError for Dead Hosts (#2225)
+ * Remove compatibility code for old versions of Hiredis, drop Packaging dependency
+ * The `deprecated` library is no longer a dependency
+ * Failover handling improvements for RedisCluster and Async RedisCluster (#2377)
+ * Fixed "cannot pickle '_thread.lock' object" bug (#2354, #2297)
+ * Added CredentialsProvider class to support password rotation
+ * Enable Lock for asyncio cluster mode
+ * Fix Sentinel.execute_command doesn't execute across the entire sentinel cluster bug (#2458)
+ * Added a replacement for the default cluster node in the event of failure (#2463)
+ * Fix for Unhandled exception related to self.host with unix socket (#2496)
+
+* 4.1.3 (Feb 8, 2022)
+ * Fix flushdb and flushall (#1926)
+ * Add redis5 and redis4 dockers (#1871)
+ * Change json.clear test multi to be up to date with redisjson (#1922)
+ * Fixing volume for unstable_cluster docker (#1914)
+ * Update changes file with changes since 4.0.0-beta2 (#1915)
+* 4.1.2 (Jan 27, 2022)
+ * Invalid OCSP certificates should raise ConnectionError on failed validation (#1907)
+ * Added retry mechanism on socket timeouts when connecting to the server (#1895)
+ * LMOVE, BLMOVE return incorrect responses (#1906)
+ * Fixing AttributeError in UnixDomainSocketConnection (#1903)
+ * Fixing TypeError in GraphCommands.explain (#1901)
+ * For tests, increasing wait time for the cluster (#1908)
+ * Increased pubsub's wait_for_messages timeout to prevent flaky tests (#1893)
+ * README code snippets formatted to highlight properly (#1888)
+ * Fix link in the main page (#1897)
+ * Documentation fixes: JSON Example, SSL Connection Examples, RTD version (#1887)
+ * Direct link to readthedocs (#1885)
+* 4.1.1 (Jan 17, 2022)
+ * Add retries to connections in Sentinel Pools (#1879)
+ * OCSP Stapling Support (#1873)
+ * Define incr/decr as aliases of incrby/decrby (#1874)
+ * FT.CREATE - support MAXTEXTFIELDS, TEMPORARY, NOHL, NOFREQS, SKIPINITIALSCAN (#1847)
+ * Timeseries docs fix (#1877)
+ * get_connection: catch OSError too (#1832)
+ * Set keys var otherwise variable not created (#1853)
+ * Clusters should optionally require full slot coverage (#1845)
+ * Triple quote docstrings in client.py PEP 257 (#1876)
+ * syncing requirements (#1870)
+ * Typo and typing in GraphCommands documentation (#1855)
+ * Allowing poetry and redis-py to install together (#1854)
+ * setup.py: Add project_urls for PyPI (#1867)
+ * Support test with redis unstable docker (#1850)
+ * Connection examples (#1835)
+ * Documentation cleanup (#1841)
+* 4.1.0 (Dec 26, 2021)
+ * OCSP stapling support (#1820)
+ * Support for SELECT (#1825)
+ * Support for specifying error types with retry (#1817)
+ * Support for RESET command since Redis 6.2.0 (#1824)
+ * Support CLIENT TRACKING (#1612)
+ * Support WRITE in CLIENT PAUSE (#1549)
+ * JSON set_file and set_path support (#1818)
+ * Allow ssl_ca_path with rediss:// urls (#1814)
+ * Support for password-encrypted SSL private keys (#1782)
+ * Support SYNC and PSYNC (#1741)
+ * Retry on error exception and timeout fixes (#1821)
+ * Fixing read race condition during pubsub (#1737)
+ * Fixing exception in listen (#1823)
+ * Fixed MovedError, and stopped iterating through startup nodes when slots are fully covered (#1819)
+ * Socket not closing after server disconnect (#1797)
+ * Single sourcing the package version (#1791)
+ * Ensure redis_connect_func is set on uds connection (#1794)
+ * SRTALGO - Skip for redis versions greater than 7.0.0 (#1831)
+ * Documentation updates (#1822)
+ * Add CI action to install package from repository commit hash (#1781) (#1790)
+ * Fix link in lmove docstring (#1793)
+ * Disabling JSON.DEBUG tests (#1787)
+ * Migrated targeted nodes to kwargs in Cluster Mode (#1762)
+ * Added support for MONITOR in clusters (#1756)
+ * Adding ROLE Command (#1610)
+ * Integrate RedisBloom support (#1683)
+ * Adding RedisGraph support (#1556)
+ * Allow overriding connection class via keyword arguments (#1752)
+ * Aggregation LOAD * support for RediSearch (#1735)
+ * Adding cluster, bloom, and graph docs (#1779)
+ * Add packaging to setup_requires, and use >= to play nice to setup.py (fixes #1625) (#1780)
+ * Fixing the license link in the readme (#1778)
+ * Removing distutils from tests (#1773)
+ * Fix cluster ACL tests (#1774)
+ * Improved RedisCluster's reinitialize_steps and documentation (#1765)
+ * Added black and isort (#1734)
+ * Link Documents for all module commands (#1711)
+ * Pyupgrade + flynt + f-strings (#1759)
+ * Remove unused aggregation subclasses in RediSearch (#1754)
+ * Adding RedisCluster client to support Redis Cluster Mode (#1660)
+ * Support RediSearch FT.PROFILE command (#1727)
+ * Adding support for non-decodable commands (#1731)
+ * COMMAND GETKEYS support (#1738)
+ * RedisJSON 2.0.4 behaviour support (#1747)
+ * Removing deprecating distutils (PEP 632) (#1730)
+ * Updating PR template (#1745)
+ * Removing duplication of Script class (#1751)
+ * Splitting documentation for read the docs (#1743)
+ * Improve code coverage for aggregation tests (#1713)
+ * Fixing COMMAND GETKEYS tests (#1750)
+ * GitHub release improvements (#1684)
+* 4.0.2 (Nov 22, 2021)
+ * Restoring Sentinel commands to redis client (#1723)
+ * Better removal of hiredis warning (#1726)
+ * Adding links to redis documents in function calls (#1719)
+* 4.0.1 (Nov 17, 2021)
+ * Removing command on initial connections (#1722)
+ * Removing hiredis warning when not installed (#1721)
+* 4.0.0 (Nov 15, 2021)
+ * FT.EXPLAINCLI intentionally raising NotImplementedError
+ * Restoring ZRANGE desc for Redis < 6.2.0 (#1697)
+ * Response parsing occasionally fails to parse floats (#1692)
+ * Re-enabling read-the-docs (#1707)
+ * Call HSET after FT.CREATE to avoid keyspace scan (#1706)
+ * Unit tests fixes for compatibility (#1703)
+ * Improve documentation about Locks (#1701)
+ * Fixes to allow --redis-url to pass through all tests (#1700)
+ * Fix unit tests running against Redis 4.0.0 (#1699)
+ * Search alias test fix (#1695)
+ * Adding RediSearch/RedisJSON tests (#1691)
+ * Updating codecov rules (#1689)
+ * Tests to validate custom JSON decoders (#1681)
+ * Added breaking icon to release drafter (#1702)
+ * Removing dependency on six (#1676)
+ * Re-enable pipeline support for JSON and TimeSeries (#1674)
+ * Export Sentinel, and SSL like other classes (#1671)
+ * Restore zrange functionality for older versions of Redis (#1670)
+ * Fixed garbage collection deadlock (#1578)
+ * Tests to validate built python packages (#1678)
+ * Sleep for flaky search test (#1680)
+ * Test function renames, to match standards (#1679)
+ * Docstring improvements for Redis class (#1675)
+ * Fix georadius tests (#1672)
+ * Improvements to JSON coverage (#1666)
+ * Add python_requires setuptools check for python > 3.6 (#1656)
+ * SMISMEMBER support (#1667)
+ * Exposing the module version in loaded_modules (#1648)
+ * RedisTimeSeries support (#1652)
+ * Support for json multipath ($) (#1663)
+ * Added boolean parsing to PEXPIRE and PEXPIREAT (#1665)
+ * Add python_requires setuptools check for python > 3.6 (#1656)
+ * Adding vulture for static analysis (#1655)
+ * Starting to clean the docs (#1657)
+ * Update README.md (#1654)
+ * Adding description format for package (#1651)
+ * Publish to pypi as releases are generated with the release drafter (#1647)
+ * Restore actions to prs (#1653)
+ * Fixing the package to include commands (#1649)
+ * Re-enabling codecov as part of CI process (#1646)
+ * Adding support for redisearch (#1640) Thanks @chayim
+ * redisjson support (#1636) Thanks @chayim
+ * Sentinel: Add SentinelManagedSSLConnection (#1419) Thanks @AbdealiJK
+ * Enable floating parameters in SET (ex and px) (#1635) Thanks @AvitalFineRedis
+ * Add warning when hiredis not installed. Recommend installation. (#1621) Thanks @adiamzn
+ * Raising NotImplementedError for SCRIPT DEBUG and DEBUG SEGFAULT (#1624) Thanks @chayim
+ * CLIENT REDIR command support (#1623) Thanks @chayim
+ * REPLICAOF command implementation (#1622) Thanks @chayim
+ * Add support to NX XX and CH to GEOADD (#1605) Thanks @AvitalFineRedis
+ * Add support to ZRANGE and ZRANGESTORE parameters (#1603) Thanks @AvitalFineRedis
+ * Pre 6.2 redis should default to None for script flush (#1641) Thanks @chayim
+ * Add FULL option to XINFO SUMMARY (#1638) Thanks @agusdmb
+ * Geosearch test should use any=True (#1594) Thanks @Andrew-Chen-Wang
+ * Removing packaging dependency (#1626) Thanks @chayim
+ * Fix client_kill_filter docs for skimpy (#1596) Thanks @Andrew-Chen-Wang
+ * Normalize minid and maxlen docs (#1593) Thanks @Andrew-Chen-Wang
+ * Update docs for multiple usernames for ACL DELUSER (#1595) Thanks @Andrew-Chen-Wang
+ * Fix grammar of get param in set command (#1588) Thanks @Andrew-Chen-Wang
+ * Fix docs for client_kill_filter (#1584) Thanks @Andrew-Chen-Wang
+ * Convert README & CONTRIBUTING from rst to md (#1633) Thanks @davidylee
+ * Test BYLEX param in zrangestore (#1634) Thanks @AvitalFineRedis
+ * Tox integrations with invoke and docker (#1632) Thanks @chayim
+ * Adding the release drafter to help simplify release notes (#1618). Thanks @chayim
+ * BACKWARDS INCOMPATIBLE: Removed support for end of life Python 2.7. #1318
+ * BACKWARDS INCOMPATIBLE: All values within Redis URLs are unquoted via
+ urllib.parse.unquote. Prior versions of redis-py supported this by
+ specifying the ``decode_components`` flag to the ``from_url`` functions.
+ This is now done by default and cannot be disabled. #589
+ * POTENTIALLY INCOMPATIBLE: Redis commands were moved into a mixin
+ (see commands.py). Anyone importing ``redis.client`` to access commands
+ directly should import ``redis.commands``. #1534, #1550
+ * Removed technical debt on REDIS_6_VERSION placeholder. Thanks @chayim #1582.
+ * Various docus fixes. Thanks @Andrew-Chen-Wang #1585, #1586.
+ * Support for LOLWUT command, available since Redis 5.0.0.
+ Thanks @brainix #1568.
+ * Added support for CLIENT REPLY, available in Redis 3.2.0.
+ Thanks @chayim #1581.
+ * Support for Auto-reconnect PubSub on get_message. Thanks @luhn #1574.
+ * Fix RST syntax error in README/ Thanks @JanCBrammer #1451.
+ * IDLETIME and FREQ support for RESTORE. Thanks @chayim #1580.
+ * Supporting args with MODULE LOAD. Thanks @chayim #1579.
+ * Updating RedisLabs with Redis. Thanks @gkorland #1575.
+ * Added support for ASYNC to SCRIPT FLUSH available in Redis 6.2.0.
+ Thanks @chayim. #1567
+ * Added CLIENT LIST fix to support multiple client ids available in
+ Redis 2.8.12. Thanks @chayim #1563.
+ * Added DISCARD support for pipelines available in Redis 2.0.0.
+ Thanks @chayim #1565.
+ * Added ACL DELUSER support for deleting lists of users available in
+ Redis 6.2.0. Thanks @chayim. #1562
+ * Added CLIENT TRACKINFO support available in Redis 6.2.0.
+ Thanks @chayim. #1560
+ * Added GEOSEARCH and GEOSEARCHSTORE support available in Redis 6.2.0.
+ Thanks @AvitalFine Redis. #1526
+ * Added LPUSHX support for lists available in Redis 4.0.0.
+ Thanks @chayim. #1559
+ * Added support for QUIT available in Redis 1.0.0.
+ Thanks @chayim. #1558
+ * Added support for COMMAND COUNT available in Redis 2.8.13.
+ Thanks @chayim. #1554.
+ * Added CREATECONSUMER support for XGROUP available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1553
+ * Including slowly complexity in INFO if available.
+ Thanks @ian28223 #1489.
+ * Added support for STRALGO available in Redis 6.0.0.
+ Thanks @AvitalFineRedis. #1528
+ * Addes support for ZMSCORE available in Redis 6.2.0.
+ Thanks @2014BDuck and @jiekun.zhu. #1437
+ * Support MINID and LIMIT on XADD available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1548
+ * Added sentinel commands FLUSHCONFIG, CKQUORUM, FAILOVER, and RESET
+ available in Redis 2.8.12.
+ Thanks @otherpirate. #834
+ * Migrated Version instead of StrictVersion for Python 3.10.
+ Thanks @tirkarthi. #1552
+ * Added retry mechanism with backoff. Thanks @nbraun-amazon. #1494
+ * Migrated commands to a mixin. Thanks @chayim. #1534
+ * Added support for ZUNION, available in Redis 6.2.0. Thanks
+ @AvitalFineRedis. #1522
+ * Added support for CLIENT LIST with ID, available in Redis 6.2.0.
+ Thanks @chayim. #1505
+ * Added support for MINID and LIMIT with xtrim, available in Reds 6.2.0.
+ Thanks @chayim. #1508
+ * Implemented LMOVE and BLMOVE commands, available in Redis 6.2.0.
+ Thanks @chayim. #1504
+ * Added GET argument to SET command, available in Redis 6.2.0.
+ Thanks @2014BDuck. #1412
+ * Documentation fixes. Thanks @enjoy-binbin @jonher937. #1496 #1532
+ * Added support for XAUTOCLAIM, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1529
+ * Added IDLE support for XPENDING, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1523
+ * Add a count parameter to lpop/rpop, available in Redis 6.2.0.
+ Thanks @wavenator. #1487
+ * Added a (pypy) trove classifier for Python 3.9.
+ Thanks @D3X. #1535
+ * Added ZINTER support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1520
+ * Added ZINTER support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1520
+ * Added ZDIFF and ZDIFFSTORE support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1518
+ * Added ZRANGESTORE support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1521
+ * Added LT and GT support for ZADD, available in Redis 6.2.0.
+ Thanks @chayim. #1509
+ * Added ZRANDMEMBER support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1519
+ * Added GETDEL support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1514
+ * Added CLIENT KILL laddr filter, available in Redis 6.2.0.
+ Thanks @chayim. #1506
+ * Added CLIENT UNPAUSE, available in Redis 6.2.0.
+ Thanks @chayim. #1512
+ * Added NOMKSTREAM support for XADD, available in Redis 6.2.0.
+ Thanks @chayim. #1507
+ * Added HRANDFIELD support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1513
+ * Added CLIENT INFO support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1517
+ * Added GETEX support, available in Redis 6.2.0.
+ Thanks @AvitalFineRedis. #1515
+ * Added support for COPY command, available in Redis 6.2.0.
+ Thanks @malinaa96. #1492
+ * Provide a development and testing environment via docker. Thanks
+ @abrookins. #1365
+ * Added support for the LPOS command available in Redis 6.0.6. Thanks
+ @aparcar #1353/#1354
+ * Added support for the ACL LOG command available in Redis 6. Thanks
+ @2014BDuck. #1307
+ * Added support for ABSTTL option of the RESTORE command available in
+ Redis 5.0. Thanks @charettes. #1423
+* 3.5.3 (June 1, 2020)
+ * Restore try/except clauses to __del__ methods. These will be removed
+ in 4.0 when more explicit resource management if enforced. #1339
+ * Update the master_address when Sentinels promote a new master. #847
+ * Update SentinelConnectionPool to not forcefully disconnect other in-use
+ connections which can negatively affect threaded applications. #1345
+* 3.5.2 (May 14, 2020)
+ * Tune the locking in ConnectionPool.get_connection so that the lock is
+ not held while waiting for the socket to establish and validate the
+ TCP connection.
+* 3.5.1 (May 9, 2020)
+ * Fix for HSET argument validation to allow any non-None key. Thanks
+ @AleksMat, #1337, #1341
+* 3.5.0 (April 29, 2020)
+ * Removed exception trapping from __del__ methods. redis-py objects that
+ hold various resources implement __del__ cleanup methods to release
+ those resources when the object goes out of scope. This provides a
+ fallback for when these objects aren't explicitly closed by user code.
+ Prior to this change any errors encountered in closing these resources
+ would be hidden from the user. Thanks @jdufresne. #1281
+ * Expanded support for connection strings specifying a username connecting
+ to pre-v6 servers. #1274
+ * Optimized Lock's blocking_timeout and sleep. If the lock cannot be
+ acquired and the sleep value would cause the loop to sleep beyond
+ blocking_timeout, fail immediately. Thanks @clslgrnc. #1263
+ * Added support for passing Python memoryviews to Redis command args that
+ expect strings or bytes. The memoryview instance is sent directly to
+ the socket such that there are zero copies made of the underlying data
+ during command packing. Thanks @Cody-G. #1265, #1285
+ * HSET command now can accept multiple pairs. HMSET has been marked as
+ deprecated now. Thanks to @laixintao #1271
+ * Don't manually DISCARD when encountering an ExecAbortError.
+ Thanks @nickgaya, #1300/#1301
+ * Reset the watched state of pipelines after calling exec. This saves
+ a roundtrip to the server by not having to call UNWATCH within
+ Pipeline.reset(). Thanks @nickgaya, #1299/#1302
+ * Added the KEEPTTL option for the SET command. Thanks
+ @laixintao #1304/#1280
+ * Added the MEMORY STATS command. #1268
+ * Lock.extend() now has a new option, `replace_ttl`. When False (the
+ default), Lock.extend() adds the `additional_time` to the lock's existing
+ TTL. When replace_ttl=True, the lock's existing TTL is replaced with
+ the value of `additional_time`.
+ * Add testing and support for PyPy.
+* 3.4.1
+ * Move the username argument in the Redis and Connection classes to the
+ end of the argument list. This helps those poor souls that specify all
+ their connection options as non-keyword arguments. #1276
+ * Prior to ACL support, redis-py ignored the username component of
+ Connection URLs. With ACL support, usernames are no longer ignored and
+ are used to authenticate against an ACL rule. Some cloud vendors with
+ managed Redis instances (like Heroku) provide connection URLs with a
+ username component pre-ACL that is not intended to be used. Sending that
+ username to Redis servers < 6.0.0 results in an error. Attempt to detect
+ this condition and retry the AUTH command with only the password such
+ that authentication continues to work for these users. #1274
+ * Removed the __eq__ hooks to Redis and ConnectionPool that were added
+ in 3.4.0. This ended up being a bad idea as two separate connection
+ pools be considered equal yet manage a completely separate set of
+ connections.
+* 3.4.0
+ * Allow empty pipelines to be executed if there are WATCHed keys.
+ This is a convenient way to test if any of the watched keys changed
+ without actually running any other commands. Thanks @brianmaissy.
+ #1233, #1234
+ * Removed support for end of life Python 3.4.
+ * Added support for all ACL commands in Redis 6. Thanks @IAmATeaPot418
+ for helping.
+ * Pipeline instances now always evaluate to True. Prior to this change,
+ pipeline instances relied on __len__ for boolean evaluation which
+ meant that pipelines with no commands on the stack would be considered
+ False. #994
+ * Client instances and Connection pools now support a 'client_name'
+ argument. If supplied, all connections created will call CLIENT SETNAME
+ as soon as the connection is opened. Thanks to @Habbie for supplying
+ the basis of this change. #802
+ * Added the 'ssl_check_hostname' argument to specify whether SSL
+ connections should require the server hostname to match the hostname
+ specified in the SSL cert. By default 'ssl_check_hostname' is False
+ for backwards compatibility. #1196
+ * Slightly optimized command packing. Thanks @Deneby67. #1255
+ * Added support for the TYPE argument to SCAN. Thanks @netocp. #1220
+ * Better thread and fork safety in ConnectionPool and
+ BlockingConnectionPool. Added better locking to synchronize critical
+ sections rather than relying on CPython-specific implementation details
+ relating to atomic operations. Adjusted how the pools identify and
+ deal with a fork. Added a ChildDeadlockedError exception that is
+ raised by child processes in the very unlikely chance that a deadlock
+ is encountered. Thanks @gmbnomis, @mdellweg, @yht804421715. #1270,
+ #1138, #1178, #906, #1262
+ * Added __eq__ hooks to the Redis and ConnectionPool classes.
+ Thanks @brainix. #1240
+* 3.3.11
+ * Further fix for the SSLError -> TimeoutError mapping to work
+ on obscure releases of Python 2.7.
+* 3.3.10
+ * Fixed a potential error handling bug for the SSLError -> TimeoutError
+ mapping introduced in 3.3.9. Thanks @zbristow. #1224
+* 3.3.9
+ * Mapped Python 2.7 SSLError to TimeoutError where appropriate. Timeouts
+ should now consistently raise TimeoutErrors on Python 2.7 for both
+ unsecured and secured connections. Thanks @zbristow. #1222
+* 3.3.8
+ * Fixed MONITOR parsing to properly parse IPv6 client addresses, unix
+ socket connections and commands issued from Lua. Thanks @kukey. #1201
+* 3.3.7
+ * Fixed a regression introduced in 3.3.0 where socket.error exceptions
+ (or subclasses) could potentially be raised instead of
+ redis.exceptions.ConnectionError. #1202
+* 3.3.6
+ * Fixed a regression in 3.3.5 that caused PubSub.get_message() to raise
+ a socket.timeout exception when passing a timeout value. #1200
+* 3.3.5
+ * Fix an issue where socket.timeout errors could be handled by the wrong
+ exception handler in Python 2.7.
+* 3.3.4
+ * More specifically identify nonblocking read errors for both SSL and
+ non-SSL connections. 3.3.1, 3.3.2 and 3.3.3 on Python 2.7 could
+ potentially mask a ConnectionError. #1197
+* 3.3.3
+ * The SSL module in Python < 2.7.9 handles non-blocking sockets
+ differently than 2.7.9+. This patch accommodates older versions. #1197
+* 3.3.2
+ * Further fixed a regression introduced in 3.3.0 involving SSL and
+ non-blocking sockets. #1197
+* 3.3.1
+ * Fixed a regression introduced in 3.3.0 involving SSL and non-blocking
+ sockets. #1197
+* 3.3.0
+ * Resolve a race condition with the PubSubWorkerThread. #1150
+ * Cleanup socket read error messages. Thanks Vic Yu. #1159
+ * Cleanup the Connection's selector correctly. Thanks Bruce Merry. #1153
+ * Added a Monitor object to make working with MONITOR output easy.
+ Thanks Roey Prat #1033
+ * Internal cleanup: Removed the legacy Token class which was necessary
+ with older version of Python that are no longer supported. #1066
+ * Response callbacks are now case insensitive. This allows users that
+ call Redis.execute_command() directly to pass lower-case command
+ names and still get reasonable responses. #1168
+ * Added support for hiredis-py 1.0.0 encoding error support. This should
+ make the PythonParser and the HiredisParser behave identically
+ when encountering encoding errors. Thanks Brian Candler. #1161/#1162
+ * All authentication errors now properly raise AuthenticationError.
+ AuthenticationError is now a subclass of ConnectionError, which will
+ cause the connection to be disconnected and cleaned up appropriately.
+ #923
+ * Add READONLY and READWRITE commands. Thanks @theodesp. #1114
+ * Remove selectors in favor of nonblocking sockets. Selectors had
+ issues in some environments including eventlet and gevent. This should
+ resolve those issues with no other side effects.
+ * Fixed an issue with XCLAIM and previously claimed but not removed
+ messages. Thanks @thomdask. #1192/#1191
+ * Allow for single connection client instances. These instances
+ are not thread safe but offer other benefits including a subtle
+ performance increase.
+ * Added extensive health checks that keep the connections lively.
+ Passing the "health_check_interval=N" option to the Redis client class
+ or to a ConnectionPool ensures that a round trip PING/PONG is successful
+ before any command if the underlying connection has been idle for more
+ than N seconds. ConnectionErrors and TimeoutErrors are automatically
+ retried once for health checks.
+ * Changed the PubSubWorkerThread to use a threading.Event object rather
+ than a boolean to control the thread's life cycle. Thanks Timothy
+ Rule. #1194/#1195.
+ * Fixed a bug in Pipeline error handling that would incorrectly retry
+ ConnectionErrors.
+* 3.2.1
+ * Fix SentinelConnectionPool to work in multiprocess/forked environments.
+* 3.2.0
+ * Added support for `select.poll` to test whether data can be read
+ on a socket. This should allow for significantly more connections to
+ be used with pubsub. Fixes #486/#1115
+ * Attempt to guarantee that the ConnectionPool hands out healthy
+ connections. Healthy connections are those that have an established
+ socket connection to the Redis server, are ready to accept a command
+ and have no data available to read. Fixes #1127/#886
+ * Use the socket.IPPROTO_TCP constant instead of socket.SOL_TCP.
+ IPPROTO_TCP is available on more interpreters (Jython for instance).
+ Thanks @Junnplus. #1130
+ * Fixed a regression introduced in 3.0 that mishandles exceptions not
+ derived from the base Exception class. KeyboardInterrupt and
+ gevent.timeout notable. Thanks Christian Fersch. #1128/#1129
+ * Significant improvements to handing connections with forked processes.
+ Parent and child processes no longer trample on each others' connections.
+ Thanks to Jay Rolette for the patch and highlighting this issue.
+ #504/#732/#784/#863
+ * PythonParser no longer closes the associated connection's socket. The
+ connection itself will close the socket. #1108/#1085
+* 3.1.0
+ * Connection URLs must have one of the following schemes:
+ redis://, rediss://, unix://. Thanks @jdupl123. #961/#969
+ * Fixed an issue with retry_on_timeout logic that caused some TimeoutErrors
+ to be retried. Thanks Aaron Yang. #1022/#1023
+ * Added support for SNI for SSL. Thanks @oridistor and Roey Prat. #1087
+ * Fixed ConnectionPool repr for pools with no connections. Thanks
+ Cody Scott. #1043/#995
+ * Fixed GEOHASH to return a None value when specifying a place that
+ doesn't exist on the server. Thanks @guybe7. #1126
+ * Fixed XREADGROUP to return an empty dictionary for messages that
+ have been deleted but still exist in the unacknowledged queue. Thanks
+ @xeizmendi. #1116
+ * Added an owned method to Lock objects. owned returns a boolean
+ indicating whether the current lock instance still owns the lock.
+ Thanks Dave Johansen. #1112
+ * Allow lock.acquire() to accept an optional token argument. If
+ provided, the token argument is used as the unique value used to claim
+ the lock. Thankd Dave Johansen. #1112
+ * Added a reacquire method to Lock objects. reacquire attempts to renew
+ the lock such that the timeout is extended to the same value that the
+ lock was initially acquired with. Thanks Ihor Kalnytskyi. #1014
+ * Stream names found within XREAD and XREADGROUP responses now properly
+ respect the decode_responses flag.
+ * XPENDING_RANGE now requires the user the specify the min, max and
+ count arguments. Newer versions of Redis prevent count from being
+ infinite so it's left to the user to specify these values explicitly.
+ * ZADD now returns None when xx=True and incr=True and an element
+ is specified that doesn't exist in the sorted set. This matches
+ what the server returns in this case. #1084
+ * Added client_kill_filter that accepts various filters to identify
+ and kill clients. Thanks Theofanis Despoudis. #1098
+ * Fixed a race condition that occurred when unsubscribing and
+ resubscribing to the same channel or pattern in rapid succession.
+ Thanks Marcin RaczyĆski. #764
+ * Added a LockNotOwnedError that is raised when trying to extend or
+ release a lock that is no longer owned. This is a subclass of LockError
+ so previous code should continue to work as expected. Thanks Joshua
+ Harlow. #1095
+ * Fixed a bug in GEORADIUS that forced decoding of places without
+ respecting the decode_responses option. Thanks Bo Bayles. #1082
+* 3.0.1
+ * Fixed regression with UnixDomainSocketConnection caused by 3.0.0.
+ Thanks Jyrki Muukkonen
+ * Fixed an issue with the new asynchronous flag on flushdb and flushall.
+ Thanks rogeryen
+ * Updated Lock.locked() method to indicate whether *any* process has
+ acquired the lock, not just the current one. This is in line with
+ the behavior of threading.Lock. Thanks Alan Justino da Silva
+* 3.0.0
+ BACKWARDS INCOMPATIBLE CHANGES
+ * When using a Lock as a context manager and the lock fails to be acquired
+ a LockError is now raised. This prevents the code block inside the
+ context manager from being executed if the lock could not be acquired.
+ * Renamed LuaLock to Lock.
+ * Removed the pipeline based Lock implementation in favor of the LuaLock
+ implementation.
+ * Only bytes, strings and numbers (ints, longs and floats) are acceptable
+ for keys and values. Previously redis-py attempted to cast other types
+ to str() and store the result. This caused must confusion and frustration
+ when passing boolean values (cast to 'True' and 'False') or None values
+ (cast to 'None'). It is now the user's responsibility to cast all
+ key names and values to bytes, strings or numbers before passing the
+ value to redis-py.
+ * The StrictRedis class has been renamed to Redis. StrictRedis will
+ continue to exist as an alias of Redis for the foreseeable future.
+ * The legacy Redis client class has been removed. It caused much confusion
+ to users.
+ * ZINCRBY arguments 'value' and 'amount' have swapped order to match the
+ the Redis server. The new argument order is: keyname, amount, value.
+ * MGET no longer raises an error if zero keys are passed in. Instead an
+ empty list is returned.
+ * MSET and MSETNX now require all keys/values to be specified in a single
+ dictionary argument named mapping. This was changed to allow for future
+ options to these commands in the future.
+ * ZADD now requires all element names/scores be specified in a single
+ dictionary argument named mapping. This was required to allow the NX,
+ XX, CH and INCR options to be specified.
+ * ssl_cert_reqs now has a default value of 'required' by default. This
+ should make connecting to a remote Redis server over SSL more secure.
+ Thanks u2mejc
+ * Removed support for EOL Python 2.6 and 3.3. Thanks jdufresne
+ OTHER CHANGES
+ * Added missing DECRBY command. Thanks derek-dchu
+ * CLUSTER INFO and CLUSTER NODES responses are now properly decoded to
+ strings.
+ * Added a 'locked()' method to Lock objects. This method returns True
+ if the lock has been acquired and owned by the current process,
+ otherwise False.
+ * EXISTS now supports multiple keys. It's return value is now the number
+ of keys in the list that exist.
+ * Ensure all commands can accept key names as bytes. This fixes issues
+ with BLPOP, BRPOP and SORT.
+ * All errors resulting from bad user input are raised as DataError
+ exceptions. DataError is a subclass of RedisError so this should be
+ transparent to anyone previously catching these.
+ * Added support for NX, XX, CH and INCR options to ZADD
+ * Added support for the MIGRATE command
+ * Added support for the MEMORY USAGE and MEMORY PURGE commands. Thanks
+ Itamar Haber
+ * Added support for the 'asynchronous' argument to FLUSHDB and FLUSHALL
+ commands. Thanks Itamar Haber
+ * Added support for the BITFIELD command. Thanks Charles Leifer and
+ Itamar Haber
+ * Improved performance on pipeline requests with large chunks of data.
+ Thanks tzickel
+ * Fixed test suite to not fail if another client is connected to the
+ server the tests are running against.
+ * Added support for SWAPDB. Thanks Itamar Haber
+ * Added support for all STREAM commands. Thanks Roey Prat and Itamar Haber
+ * SHUTDOWN now accepts the 'save' and 'nosave' arguments. Thanks
+ dwilliams-kenzan
+ * Added support for ZPOPMAX, ZPOPMIN, BZPOPMAX, BZPOPMIN. Thanks
+ Itamar Haber
+ * Added support for the 'type' argument in CLIENT LIST. Thanks Roey Prat
+ * Added support for CLIENT PAUSE. Thanks Roey Prat
+ * Added support for CLIENT ID and CLIENT UNBLOCK. Thanks Itamar Haber
+ * GEODIST now returns a None value when referencing a place that does
+ not exist. Thanks qingping209
+ * Added a ping() method to pubsub objects. Thanks krishan-carbon
+ * Fixed a bug with keys in the INFO dict that contained ':' symbols.
+ Thanks mzalimeni
+ * Fixed the select system call retry compatibility with Python 2.x.
+ Thanks lddubeau
+ * max_connections is now a valid querystring argument for creating
+ connection pools from URLs. Thanks mmaslowskicc
+ * Added the UNLINK command. Thanks yozel
+ * Added socket_type option to Connection for configurability.
+ Thanks garlicnation
+ * Lock.do_acquire now atomically sets acquires the lock and sets the
+ expire value via set(nx=True, px=timeout). Thanks 23doors
+ * Added 'count' argument to SPOP. Thanks AlirezaSadeghi
+ * Fixed an issue parsing client_list responses that contained an '='.
+ Thanks swilly22
+* 2.10.6
+ * Various performance improvements. Thanks cjsimpson
+ * Fixed a bug with SRANDMEMBER where the behavior for `number=0` did
+ not match the spec. Thanks Alex Wang
+ * Added HSTRLEN command. Thanks Alexander Putilin
+ * Added the TOUCH command. Thanks Anis Jonischkeit
+ * Remove unnecessary calls to the server when registering Lua scripts.
+ Thanks Ben Greenberg
+ * SET's EX and PX arguments now allow values of zero. Thanks huangqiyin
+ * Added PUBSUB {CHANNELS, NUMPAT, NUMSUB} commands. Thanks Angus Pearson
+ * PubSub connections that encounter `InterruptedError`s now
+ retry automatically. Thanks Carlton Gibson and Seth M. Larson
+ * LPUSH and RPUSH commands run on PyPy now correctly returns the number
+ of items of the list. Thanks Jeong YunWon
+ * Added support to automatically retry socket EINTR errors. Thanks
+ Thomas Steinacher
+ * PubSubWorker threads started with `run_in_thread` are now daemonized
+ so the thread shuts down when the running process goes away. Thanks
+ Keith Ainsworth
+ * Added support for GEO commands. Thanks Pau Freixes, Alex DeBrie and
+ Abraham Toriz
+ * Made client construction from URLs smarter. Thanks Tim Savage
+ * Added support for CLUSTER * commands. Thanks Andy Huang
+ * The RESTORE command now accepts an optional `replace` boolean.
+ Thanks Yoshinari Takaoka
+ * Attempt to connect to a new Sentinel if a TimeoutError occurs. Thanks
+ Bo Lopker
+ * Fixed a bug in the client's `__getitem__` where a KeyError would be
+ raised if the value returned by the server is an empty string.
+ Thanks Javier Candeira.
+ * Socket timeouts when connecting to a server are now properly raised
+ as TimeoutErrors.
* 2.10.5
* Allow URL encoded parameters in Redis URLs. Characters like a "/" can
now be URL encoded and redis-py will correctly decode them. Thanks
@@ -7,11 +671,11 @@
cleans up the connection, unsubscribes from any channels and patterns
previously subscribed to and consumes any waiting messages on the socket.
* Added the ability to sleep for a brief period in the event of a
- WatchError occuring. Thanks Joshua Harlow.
+ WatchError occurring. Thanks Joshua Harlow.
* Fixed a bug with pipeline error reporting when dealing with characters
in error messages that could not be encoded to the connection's
character set. Thanks Hendrik Muhs.
- * Fixed a bug in Sentinel connections that would inadvertantly connect
+ * Fixed a bug in Sentinel connections that would inadvertently connect
to the master when the connection pool resets. Thanks
https://github.com/df3n5
* Better timeout support in Pubsub get_message. Thanks Andy Isaacson.
@@ -32,7 +696,7 @@
* 2.10.2
* Added support for Hiredis's new bytearray support. Thanks
https://github.com/tzickel
- * POSSIBLE BACKWARDS INCOMPATBLE CHANGE: Fixed a possible race condition
+ * POSSIBLE BACKWARDS INCOMPATIBLE CHANGE: Fixed a possible race condition
when multiple threads share the same Lock instance with a timeout. Lock
tokens are now stored in thread local storage by default. If you have
code that acquires a lock in one thread and passes that lock instance to
@@ -46,7 +710,7 @@
either to StrictRedis.__init__ or from_url will still work but will
also emit a DeprecationWarning. Instead use the "encoding" and
"encoding_errors" options.
- * Fixed a compatability bug with Python 3 when the server closes a
+ * Fixed a compatibility bug with Python 3 when the server closes a
connection.
* Added BITPOS command. Thanks https://github.com/jettify.
* Fixed a bug when attempting to send large values to Redis in a Pipeline.
@@ -55,12 +719,12 @@
master and receives a READONLY error will disconnect and reconnect to
the master.
* 2.10.0
- * Discontinuted support for Python 2.5. Upgrade. You'll be happier.
+ * Discontinued support for Python 2.5. Upgrade. You'll be happier.
* The HiRedis parser will now properly raise ConnectionErrors.
* Completely refactored PubSub support. Fixes all known PubSub bugs and
adds a bunch of new features. Docs can be found in the README under the
new "Publish / Subscribe" section.
- * Added the new HyperLogLog commanads (PFADD, PFCOUNT, PFMERGE). Thanks
+ * Added the new HyperLogLog commands (PFADD, PFCOUNT, PFMERGE). Thanks
Pepijn de Vos and Vincent Ohprecio.
* Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus
Kaiserswerth.
@@ -87,12 +751,12 @@
* Fixed Sentinel state parsing on Python 3.
* Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET
commands. Thanks Greg Murphy.
- * INFO ouput that doesn't follow the "key:value" format will now be
+ * INFO output that doesn't follow the "key:value" format will now be
appended to a key named "__raw__" in the INFO dictionary. Thanks Pedro
Larroy.
* The "vagrant" directory contains a complete vagrant environment for
redis-py developers. The environment runs a Redis master, a Redis slave,
- and 3 Sentinels. Future iterations of the test sutie will incorporate
+ and 3 Sentinels. Future iterations of the test suite will incorporate
more integration style tests, ensuring things like failover happen
correctly.
* It's now possible to create connection pool instances from a URL.
@@ -105,7 +769,7 @@
* Fixed a bug where some encodings (like utf-16) were unusable on Python 3
as command names and literals would get encoded.
* Added an SSLConnection class that allows for secure connections through
- stunnel or other means. Construct and SSL connection with the sll=True
+ stunnel or other means. Construct an SSL connection with the ssl=True
option on client classes, using the rediss:// scheme from an URL, or
by passing the SSLConnection class to a connection pool's
connection_class argument. Thanks https://github.com/oranagra.
@@ -159,7 +823,7 @@
for the report.
* Connections now call socket.shutdown() prior to socket.close() to
ensure communication ends immediately per the note at
- http://docs.python.org/2/library/socket.html#socket.socket.close
+ https://docs.python.org/2/library/socket.html#socket.socket.close
Thanks to David Martin for pointing this out.
* Lock checks are now based on floats rather than ints. Thanks
Vitja Makarov.
@@ -193,11 +857,11 @@
* Prevent DISCARD from being called if MULTI wasn't also called. Thanks
Pete Aykroyd.
* SREM now returns an integer indicating the number of items removed from
- the set. Thanks http://github.com/ronniekk.
+ the set. Thanks https://github.com/ronniekk.
* Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3.
Thanks Nathan Wan.
* Added CLIENT GETNAME and CLIENT SETNAME commands.
- Thanks http://github.com/bitterb.
+ Thanks https://github.com/bitterb.
* It's now possible to use len() on a pipeline instance to determine the
number of commands that will be executed. Thanks Jon Parise.
* Fixed a bug in INFO's parse routine with floating point numbers. Thanks
@@ -299,15 +963,15 @@
and LREM. Docs have been updated to suggesting the use of StrictRedis.
* SETEX in StrictRedis is now compliant with official Redis SETEX command.
the name, value, time implementation moved to "Redis" for backwards
- compatability.
+ compatibility.
* 2.4.9
- * Removed socket retry logic in Connection. This is the responsbility of
+ * Removed socket retry logic in Connection. This is the responsibility of
the caller to determine if the command is safe and can be retried. Thanks
David Wolver.
* Added some extra guards around various types of exceptions being raised
when sending or parsing data. Thanks David Wolver and Denis Bilenko.
* 2.4.8
- * Imported with_statement from __future__ for Python 2.5 compatability.
+ * Imported with_statement from __future__ for Python 2.5 compatibility.
* 2.4.7
* Fixed a bug where some connections were not getting released back to the
connection pool after pipeline execution.
@@ -363,7 +1027,7 @@
by passing it to the `connection_class` argument of the ConnectionPool.
* Connections no longer derive from threading.local. See threading.local
note below.
- * ConnectionPool has been comletely refactored. The ConnectionPool now
+ * ConnectionPool has been completely refactored. The ConnectionPool now
maintains a list of connections. The redis-py client only hangs on to
a ConnectionPool instance, calling get_connection() anytime it needs to
send a command. When get_connection() is called, the command name and
@@ -373,8 +1037,8 @@
disconnect() to force all connections in the pool to disconnect from
the Redis server.
* redis-py no longer support the SELECT command. You can still connect to
- a specific database by specifing it when instantiating a client instance
- or by creating a connection pool. If you need to talk to multiplate
+ a specific database by specifying it when instantiating a client instance
+ or by creating a connection pool. If you need to talk to multiple
databases within your application, you should use a separate client
instance for each database you want to talk to.
* Completely refactored Publish/Subscribe support. The subscribe and listen
@@ -400,7 +1064,7 @@
* Newer Redis versions return a LOADING message for some commands while
the database is loading from disk during server start. This could cause
problems with SELECT. We now force a socket disconnection prior to
- raising a ResponseError so subsuquent connections have to reconnect and
+ raising a ResponseError so subsequent connections have to reconnect and
re-select the appropriate database. Thanks to Benjamin Anderson for
finding this and fixing.
* 2.2.4
@@ -415,7 +1079,7 @@
to be used as timeout values. No changes to existing code required.
* WATCH now supports multiple keys. Thanks Rich Schumacher.
* Broke out some code that was Python 2.4 incompatible. redis-py should
- now be useable on 2.4, but this hasn't actually been tested. Thanks
+ now be usable on 2.4, but this hasn't actually been tested. Thanks
Dan Colish for the patch.
* Optimized some code using izip and islice. Should have a pretty good
speed up on larger data sets. Thanks Dan Colish.
@@ -434,7 +1098,7 @@
* The INFO command should be parsed correctly on 2.2.x server versions
and is backwards compatible with older versions. Thanks Brett Hoerner.
* 2.2.2
- * Fixed a bug in ZREVRANK where retriving the rank of a value not in
+ * Fixed a bug in ZREVRANK where retrieving the rank of a value not in
the zset would raise an error.
* Fixed a bug in Connection.send where the errno import was getting
overwritten by a local variable.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..e31ec3491e
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,184 @@
+# Contributing
+
+## Introduction
+
+First off, thank you for considering contributing to redis-py. We value
+community contributions!
+
+## Contributions We Need
+
+You may already know what you want to contribute \-- a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know what to contribute, keep an open mind! Improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/andymccurdy/redis-py/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+-
+-
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of redis-py
+2. Do the changes in your fork
+3.
+ *Create a virtualenv and install the development dependencies from the dev_requirements.txt file:*
+
+ a. python -m venv .venv
+ b. source .venv/bin/activate
+ c. pip install -r dev_requirements.txt
+
+4. If you need a development environment, run `invoke devenv`
+5. While developing, make sure the tests pass by running `invoke tests`
+6. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+## The Development Environment
+
+Running `invoke devenv` installs the development dependencies specified
+in the dev_requirements.txt. It starts all of the dockers used by this
+project, and leaves them running. These can be easily cleaned up with
+`invoke clean`. NOTE: it is assumed that the user running these tests,
+can execute docker and its various commands.
+
+- A master Redis node
+- A Redis replica node
+- Three sentinel Redis nodes
+- A redis cluster
+- An stunnel docker, fronting the master Redis node
+- A Redis node, running unstable - the latest redis
+
+The replica node, is a replica of the master node, using the
+[leader-follower replication](https://redis.io/topics/replication)
+feature.
+
+The sentinels monitor the master node in a [sentinel high-availability
+configuration](https://redis.io/topics/sentinel).
+
+## Testing
+
+Call `invoke tests` to run all tests, or `invoke all-tests` to run linters
+tests as well. With the 'tests' and 'all-tests' targets, all Redis and
+RedisCluster tests will be run.
+
+It is possible to run only Redis client tests (with cluster mode disabled) by
+using `invoke standalone-tests`; similarly, RedisCluster tests can be run by using
+`invoke cluster-tests`.
+
+Each run of tox starts and stops the various dockers required. Sometimes
+things get stuck, an `invoke clean` can help.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of python. Feel free to test your
+changes against all the python versions supported, as declared by the
+tox.ini file (eg: tox -e py39). If you have the various python versions
+on your desktop, you can run *tox* by itself, to test all supported
+versions.
+
+### Docker Tips
+
+Following are a few tips that can help you work with the Docker-based
+development environment.
+
+To get a bash shell inside of a container:
+
+`$ docker run -it /bin/bash`
+
+**Note**: The term \"service\" refers to the \"services\" defined in the
+`tox.ini` file at the top of the repo: \"master\", \"replicaof\",
+\"sentinel_1\", \"sentinel_2\", \"sentinel_3\".
+
+Containers run a minimal Debian image that probably lacks tools you want
+to use. To install packages, first get a bash session (see previous tip)
+and then run:
+
+`$ apt update && apt install `
+
+You can see the logging output of a containers like this:
+
+`$ docker logs -f `
+
+The command make test runs all tests in all tested Python
+environments. To run the tests in a single environment, like Python 3.9,
+use a command like this:
+
+`$ docker-compose run test tox -e py39 -- --redis-url=redis://master:6379/9`
+
+Here, the flag `-e py39` runs tests against the Python 3.9 tox
+environment. And note from the example that whenever you run tests like
+this, instead of using make test, you need to pass
+`-- --redis-url=redis://master:6379/9`. This points the tests at the
+\"master\" container.
+
+Our test suite uses `pytest`. You can run a specific test suite against
+a specific Python version like this:
+
+`$ docker-compose run test tox -e py37 -- --redis-url=redis://master:6379/9 tests/test_commands.py`
+
+### Troubleshooting
+
+If you get any errors when running `make dev` or `make test`, make sure
+that you are using supported versions of Docker.
+
+Please try at least versions of Docker.
+
+- Docker 19.03.12
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source ()](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of redis-py are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## How to Suggest a Feature or Enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be under way on the feature you want -- or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code Review Process
+
+The core team looks at Pull Requests on a regular basis. We will give
+feedback as as soon as possible. After feedback, we expect a response
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
diff --git a/LICENSE b/LICENSE
index 29a3fe3845..00aee10d6a 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,22 +1,21 @@
-Copyright (c) 2012 Andy McCurdy
+MIT License
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
+Copyright (c) 2022, Redis, inc.
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
index 7aaee12a1d..97fa305889 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,6 @@
-include CHANGES
include INSTALL
include LICENSE
-include README.rst
+include README.md
exclude __pycache__
recursive-include tests *
recursive-exclude tests *.pyc
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..e97119a888
--- /dev/null
+++ b/README.md
@@ -0,0 +1,146 @@
+# redis-py
+
+The Python interface to the Redis key-value store.
+
+[](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster)
+[](https://redis-py.readthedocs.io/en/stable/)
+[](./LICENSE)
+[](https://pypi.org/project/redis/)
+[](https://github.com/redis/redis-py/releases)
+[](https://codecov.io/gh/redis/redis-py)
+
+[Installation](#installation) | [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md)
+
+---------------------------------------------
+
+## Installation
+
+Start a redis via docker:
+
+``` bash
+docker run -p 6379:6379 -it redis/redis-stack:latest
+```
+
+To install redis-py, simply:
+
+``` bash
+$ pip install redis
+```
+
+For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes.
+By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing.
+
+``` bash
+$ pip install "redis[hiredis]"
+```
+
+Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)!
+
+## Supported Redis Versions
+
+The most recent version of this library supports redis version [5.0](https://github.com/redis/redis/blob/5.0/00-RELEASENOTES), [6.0](https://github.com/redis/redis/blob/6.0/00-RELEASENOTES), [6.2](https://github.com/redis/redis/blob/6.2/00-RELEASENOTES), and [7.0](https://github.com/redis/redis/blob/7.0/00-RELEASENOTES).
+
+The table below higlights version compatibility of the most-recent library versions and redis versions.
+
+| Library version | Supported redis versions |
+|-----------------|-------------------|
+| 3.5.3 | <= 6.2 Family of releases |
+| >= 4.1.0 | Version 5.0 to current |
+
+
+## Usage
+
+### Basic Example
+
+``` python
+>>> import redis
+>>> r = redis.Redis(host='localhost', port=6379, db=0)
+>>> r.set('foo', 'bar')
+True
+>>> r.get('foo')
+b'bar'
+```
+
+The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*. For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html).
+
+### Connection Pools
+
+By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools).
+
+``` python
+>>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
+>>> r = redis.Redis(connection_pool=pool)
+```
+
+Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client).
+
+### Redis Commands
+
+There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html).
+
+## Advanced Topics
+
+The [official Redis command documentation](https://redis.io/commands)
+does a great job of explaining each command in detail. redis-py attempts
+to adhere to the official command syntax. There are a few exceptions:
+
+- **MULTI/EXEC**: These are implemented as part of the Pipeline class.
+ The pipeline is wrapped with the MULTI and EXEC statements by
+ default when it is executed, which can be disabled by specifying
+ transaction=False. See more about Pipelines below.
+
+- **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as
+ a separate class as it places the underlying connection in a state
+ where it can\'t execute non-pubsub commands. Calling the pubsub
+ method from the Redis client will return a PubSub instance where you
+ can subscribe to channels and listen for messages. You can only call
+ PUBLISH from the Redis client (see [this comment on issue
+ #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015)
+ for details).
+
+For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html).
+
+### Pipelines
+
+The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list.
+
+
+``` python
+>>> pipe = r.pipeline()
+>>> pipe.set('foo', 5)
+>>> pipe.set('bar', 18.5)
+>>> pipe.set('blee', "hello world!")
+>>> pipe.execute()
+[True, True, True]
+```
+
+### PubSub
+
+The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels.
+
+``` python
+>>> r = redis.Redis(...)
+>>> p = r.pubsub()
+>>> p.subscribe('my-first-channel', 'my-second-channel', ...)
+>>> p.get_message()
+{'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
+```
+
+
+--------------------------
+
+### Author
+
+redis-py is developed and maintained by [Redis Inc](https://redis.com). It can be found [here](
+https://github.com/redis/redis-py), or downloaded from [pypi](https://pypi.org/project/redis/).
+
+Special thanks to:
+
+- Andy McCurdy () the original author of redis-py.
+- Ludovico Magnocavallo, author of the original Python Redis client,
+ from which some of the socket code is still used.
+- Alexander Solovyov for ideas on the generic response callback
+ system.
+- Paul Hubbard for initial packaging support.
+
+[](https://www.redis.com)
diff --git a/README.rst b/README.rst
deleted file mode 100644
index 6caf203d5f..0000000000
--- a/README.rst
+++ /dev/null
@@ -1,673 +0,0 @@
-redis-py
-========
-
-The Python interface to the Redis key-value store.
-
-.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.png?branch=master
- :target: http://travis-ci.org/andymccurdy/redis-py
-
-Installation
-------------
-
-redis-py requires a running Redis server. See `Redis's quickstart
-`_ for installation instructions.
-
-To install redis-py, simply:
-
-.. code-block:: bash
-
- $ sudo pip install redis
-
-or alternatively (you really should be using pip though):
-
-.. code-block:: bash
-
- $ sudo easy_install redis
-
-or from source:
-
-.. code-block:: bash
-
- $ sudo python setup.py install
-
-
-Getting Started
----------------
-
-.. code-block:: pycon
-
- >>> import redis
- >>> r = redis.StrictRedis(host='localhost', port=6379, db=0)
- >>> r.set('foo', 'bar')
- True
- >>> r.get('foo')
- 'bar'
-
-API Reference
--------------
-
-The `official Redis command documentation `_ does a
-great job of explaining each command in detail. redis-py exposes two client
-classes that implement these commands. The StrictRedis class attempts to adhere
-to the official command syntax. There are a few exceptions:
-
-* **SELECT**: Not implemented. See the explanation in the Thread Safety section
- below.
-* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py
- uses 'delete' instead.
-* **CONFIG GET|SET**: These are implemented separately as config_get or config_set.
-* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The
- pipeline is wrapped with the MULTI and EXEC statements by default when it
- is executed, which can be disabled by specifying transaction=False.
- See more about Pipelines below.
-* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate
- class as it places the underlying connection in a state where it can't
- execute non-pubsub commands. Calling the pubsub method from the Redis client
- will return a PubSub instance where you can subscribe to channels and listen
- for messages. You can only call PUBLISH from the Redis client (see
- `this comment on issue #151
- `_
- for details).
-* **SCAN/SSCAN/HSCAN/ZSCAN**: The \*SCAN commands are implemented as they
- exist in the Redis documentation. In addition, each command has an equivilant
- iterator method. These are purely for convenience so the user doesn't have
- to keep track of the cursor while iterating. Use the
- scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior.
-
-In addition to the changes above, the Redis class, a subclass of StrictRedis,
-overrides several other commands to provide backwards compatibility with older
-versions of redis-py:
-
-* **LREM**: Order of 'num' and 'value' arguments reversed such that 'num' can
- provide a default value of zero.
-* **ZADD**: Redis specifies the 'score' argument before 'value'. These were swapped
- accidentally when being implemented and not discovered until after people
- were already using it. The Redis class expects \*args in the form of:
- `name1, score1, name2, score2, ...`
-* **SETEX**: Order of 'time' and 'value' arguments reversed.
-
-
-More Detail
------------
-
-Connection Pools
-^^^^^^^^^^^^^^^^
-
-Behind the scenes, redis-py uses a connection pool to manage connections to
-a Redis server. By default, each Redis instance you create will in turn create
-its own connection pool. You can override this behavior and use an existing
-connection pool by passing an already created connection pool instance to the
-connection_pool argument of the Redis class. You may choose to do this in order
-to implement client side sharding or have finer grain control of how
-connections are managed.
-
-.. code-block:: pycon
-
- >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
- >>> r = redis.Redis(connection_pool=pool)
-
-Connections
-^^^^^^^^^^^
-
-ConnectionPools manage a set of Connection instances. redis-py ships with two
-types of Connections. The default, Connection, is a normal TCP socket based
-connection. The UnixDomainSocketConnection allows for clients running on the
-same device as the server to connect via a unix domain socket. To use a
-UnixDomainSocketConnection connection, simply pass the unix_socket_path
-argument, which is a string to the unix domain socket file. Additionally, make
-sure the unixsocket parameter is defined in your redis.conf file. It's
-commented out by default.
-
-.. code-block:: pycon
-
- >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock')
-
-You can create your own Connection subclasses as well. This may be useful if
-you want to control the socket behavior within an async framework. To
-instantiate a client class using your own connection, you need to create
-a connection pool, passing your class to the connection_class argument.
-Other keyword parameters you pass to the pool will be passed to the class
-specified during initialization.
-
-.. code-block:: pycon
-
- >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass,
- your_arg='...', ...)
-
-Parsers
-^^^^^^^
-
-Parser classes provide a way to control how responses from the Redis server
-are parsed. redis-py ships with two parser classes, the PythonParser and the
-HiredisParser. By default, redis-py will attempt to use the HiredisParser if
-you have the hiredis module installed and will fallback to the PythonParser
-otherwise.
-
-Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was
-kind enough to create Python bindings. Using Hiredis can provide up to a
-10x speed improvement in parsing responses from the Redis server. The
-performance increase is most noticeable when retrieving many pieces of data,
-such as from LRANGE or SMEMBERS operations.
-
-Hiredis is available on PyPI, and can be installed via pip or easy_install
-just like redis-py.
-
-.. code-block:: bash
-
- $ pip install hiredis
-
-or
-
-.. code-block:: bash
-
- $ easy_install hiredis
-
-Response Callbacks
-^^^^^^^^^^^^^^^^^^
-
-The client class uses a set of callbacks to cast Redis responses to the
-appropriate Python type. There are a number of these callbacks defined on
-the Redis client class in a dictionary called RESPONSE_CALLBACKS.
-
-Custom callbacks can be added on a per-instance basis using the
-set_response_callback method. This method accepts two arguments: a command
-name and the callback. Callbacks added in this manner are only valid on the
-instance the callback is added to. If you want to define or override a callback
-globally, you should make a subclass of the Redis client and add your callback
-to its REDIS_CALLBACKS class dictionary.
-
-Response callbacks take at least one parameter: the response from the Redis
-server. Keyword arguments may also be accepted in order to further control
-how to interpret the response. These keyword arguments are specified during the
-command's call to execute_command. The ZRANGE implementation demonstrates the
-use of response callback keyword arguments with its "withscores" argument.
-
-Thread Safety
-^^^^^^^^^^^^^
-
-Redis client instances can safely be shared between threads. Internally,
-connection instances are only retrieved from the connection pool during
-command execution, and returned to the pool directly after. Command execution
-never modifies state on the client instance.
-
-However, there is one caveat: the Redis SELECT command. The SELECT command
-allows you to switch the database currently in use by the connection. That
-database remains selected until another is selected or until the connection is
-closed. This creates an issue in that connections could be returned to the pool
-that are connected to a different database.
-
-As a result, redis-py does not implement the SELECT command on client
-instances. If you use multiple Redis databases within the same application, you
-should create a separate client instance (and possibly a separate connection
-pool) for each database.
-
-It is not safe to pass PubSub or Pipeline objects between threads.
-
-Pipelines
-^^^^^^^^^
-
-Pipelines are a subclass of the base Redis class that provide support for
-buffering multiple commands to the server in a single request. They can be used
-to dramatically increase the performance of groups of commands by reducing the
-number of back-and-forth TCP packets between the client and server.
-
-Pipelines are quite simple to use:
-
-.. code-block:: pycon
-
- >>> r = redis.Redis(...)
- >>> r.set('bing', 'baz')
- >>> # Use the pipeline() method to create a pipeline instance
- >>> pipe = r.pipeline()
- >>> # The following SET commands are buffered
- >>> pipe.set('foo', 'bar')
- >>> pipe.get('bing')
- >>> # the EXECUTE call sends all buffered commands to the server, returning
- >>> # a list of responses, one for each command.
- >>> pipe.execute()
- [True, 'baz']
-
-For ease of use, all commands being buffered into the pipeline return the
-pipeline object itself. Therefore calls can be chained like:
-
-.. code-block:: pycon
-
- >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute()
- [True, True, 6]
-
-In addition, pipelines can also ensure the buffered commands are executed
-atomically as a group. This happens by default. If you want to disable the
-atomic nature of a pipeline but still want to buffer commands, you can turn
-off transactions.
-
-.. code-block:: pycon
-
- >>> pipe = r.pipeline(transaction=False)
-
-A common issue occurs when requiring atomic transactions but needing to
-retrieve values in Redis prior for use within the transaction. For instance,
-let's assume that the INCR command didn't exist and we need to build an atomic
-version of INCR in Python.
-
-The completely naive implementation could GET the value, increment it in
-Python, and SET the new value back. However, this is not atomic because
-multiple clients could be doing this at the same time, each getting the same
-value from GET.
-
-Enter the WATCH command. WATCH provides the ability to monitor one or more keys
-prior to starting a transaction. If any of those keys change prior the
-execution of that transaction, the entire transaction will be canceled and a
-WatchError will be raised. To implement our own client-side INCR command, we
-could do something like this:
-
-.. code-block:: pycon
-
- >>> with r.pipeline() as pipe:
- ... while 1:
- ... try:
- ... # put a WATCH on the key that holds our sequence value
- ... pipe.watch('OUR-SEQUENCE-KEY')
- ... # after WATCHing, the pipeline is put into immediate execution
- ... # mode until we tell it to start buffering commands again.
- ... # this allows us to get the current value of our sequence
- ... current_value = pipe.get('OUR-SEQUENCE-KEY')
- ... next_value = int(current_value) + 1
- ... # now we can put the pipeline back into buffered mode with MULTI
- ... pipe.multi()
- ... pipe.set('OUR-SEQUENCE-KEY', next_value)
- ... # and finally, execute the pipeline (the set command)
- ... pipe.execute()
- ... # if a WatchError wasn't raised during execution, everything
- ... # we just did happened atomically.
- ... break
- ... except WatchError:
- ... # another client must have changed 'OUR-SEQUENCE-KEY' between
- ... # the time we started WATCHing it and the pipeline's execution.
- ... # our best bet is to just retry.
- ... continue
-
-Note that, because the Pipeline must bind to a single connection for the
-duration of a WATCH, care must be taken to ensure that the connection is
-returned to the connection pool by calling the reset() method. If the
-Pipeline is used as a context manager (as in the example above) reset()
-will be called automatically. Of course you can do this the manual way by
-explicity calling reset():
-
-.. code-block:: pycon
-
- >>> pipe = r.pipeline()
- >>> while 1:
- ... try:
- ... pipe.watch('OUR-SEQUENCE-KEY')
- ... ...
- ... pipe.execute()
- ... break
- ... except WatchError:
- ... continue
- ... finally:
- ... pipe.reset()
-
-A convenience method named "transaction" exists for handling all the
-boilerplate of handling and retrying watch errors. It takes a callable that
-should expect a single parameter, a pipeline object, and any number of keys to
-be WATCHed. Our client-side INCR command above can be written like this,
-which is much easier to read:
-
-.. code-block:: pycon
-
- >>> def client_side_incr(pipe):
- ... current_value = pipe.get('OUR-SEQUENCE-KEY')
- ... next_value = int(current_value) + 1
- ... pipe.multi()
- ... pipe.set('OUR-SEQUENCE-KEY', next_value)
- >>>
- >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
- [True]
-
-Publish / Subscribe
-^^^^^^^^^^^^^^^^^^^
-
-redis-py includes a `PubSub` object that subscribes to channels and listens
-for new messages. Creating a `PubSub` object is easy.
-
-.. code-block:: pycon
-
- >>> r = redis.StrictRedis(...)
- >>> p = r.pubsub()
-
-Once a `PubSub` instance is created, channels and patterns can be subscribed
-to.
-
-.. code-block:: pycon
-
- >>> p.subscribe('my-first-channel', 'my-second-channel', ...)
- >>> p.psubscribe('my-*', ...)
-
-The `PubSub` instance is now subscribed to those channels/patterns. The
-subscription confirmations can be seen by reading messages from the `PubSub`
-instance.
-
-.. code-block:: pycon
-
- >>> p.get_message()
- {'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L}
- >>> p.get_message()
- {'pattern': None, 'type': 'subscribe', 'channel': 'my-first-channel', 'data': 2L}
- >>> p.get_message()
- {'pattern': None, 'type': 'psubscribe', 'channel': 'my-*', 'data': 3L}
-
-Every message read from a `PubSub` instance will be a dictionary with the
-following keys.
-
-* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe',
- 'punsubscribe', 'message', 'pmessage'
-* **channel**: The channel [un]subscribed to or the channel a message was
- published to
-* **pattern**: The pattern that matched a published message's channel. Will be
- `None` in all cases except for 'pmessage' types.
-* **data**: The message data. With [un]subscribe messages, this value will be
- the number of channels and patterns the connection is currently subscribed
- to. With [p]message messages, this value will be the actual published
- message.
-
-Let's send a message now.
-
-.. code-block:: pycon
-
- # the publish method returns the number matching channel and pattern
- # subscriptions. 'my-first-channel' matches both the 'my-first-channel'
- # subscription and the 'my-*' pattern subscription, so this message will
- # be delivered to 2 channels/patterns
- >>> r.publish('my-first-channel', 'some data')
- 2
- >>> p.get_message()
- {'channel': 'my-first-channel', 'data': 'some data', 'pattern': None, 'type': 'message'}
- >>> p.get_message()
- {'channel': 'my-first-channel', 'data': 'some data', 'pattern': 'my-*', 'type': 'pmessage'}
-
-Unsubscribing works just like subscribing. If no arguments are passed to
-[p]unsubscribe, all channels or patterns will be unsubscribed from.
-
-.. code-block:: pycon
-
- >>> p.unsubscribe()
- >>> p.punsubscribe('my-*')
- >>> p.get_message()
- {'channel': 'my-second-channel', 'data': 2L, 'pattern': None, 'type': 'unsubscribe'}
- >>> p.get_message()
- {'channel': 'my-first-channel', 'data': 1L, 'pattern': None, 'type': 'unsubscribe'}
- >>> p.get_message()
- {'channel': 'my-*', 'data': 0L, 'pattern': None, 'type': 'punsubscribe'}
-
-redis-py also allows you to register callback functions to handle published
-messages. Message handlers take a single argument, the message, which is a
-dictionary just like the examples above. To subscribe to a channel or pattern
-with a message handler, pass the channel or pattern name as a keyword argument
-with its value being the callback function.
-
-When a message is read on a channel or pattern with a message handler, the
-message dictionary is created and passed to the message handler. In this case,
-a `None` value is returned from get_message() since the message was already
-handled.
-
-.. code-block:: pycon
-
- >>> def my_handler(message):
- ... print 'MY HANDLER: ', message['data']
- >>> p.subscribe(**{'my-channel': my_handler})
- # read the subscribe confirmation message
- >>> p.get_message()
- {'pattern': None, 'type': 'subscribe', 'channel': 'my-channel', 'data': 1L}
- >>> r.publish('my-channel', 'awesome data')
- 1
- # for the message handler to work, we need tell the instance to read data.
- # this can be done in several ways (read more below). we'll just use
- # the familiar get_message() function for now
- >>> message = p.get_message()
- MY HANDLER: awesome data
- # note here that the my_handler callback printed the string above.
- # `message` is None because the message was handled by our handler.
- >>> print message
- None
-
-If your application is not interested in the (sometimes noisy)
-subscribe/unsubscribe confirmation messages, you can ignore them by passing
-`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all
-subscribe/unsubscribe messages to be read, but they won't bubble up to your
-application.
-
-.. code-block:: pycon
-
- >>> p = r.pubsub(ignore_subscribe_messages=True)
- >>> p.subscribe('my-channel')
- >>> p.get_message() # hides the subscribe message and returns None
- >>> r.publish('my-channel')
- 1
- >>> p.get_message()
- {'channel': 'my-channel', data': 'my data', 'pattern': None, 'type': 'message'}
-
-There are three different strategies for reading messages.
-
-The examples above have been using `pubsub.get_message()`. Behind the scenes,
-`get_message()` uses the system's 'select' module to quickly poll the
-connection's socket. If there's data available to be read, `get_message()` will
-read it, format the message and return it or pass it to a message handler. If
-there's no data to be read, `get_message()` will immediately return None. This
-makes it trivial to integrate into an existing event loop inside your
-application.
-
-.. code-block:: pycon
-
- >>> while True:
- >>> message = p.get_message()
- >>> if message:
- >>> # do something with the message
- >>> time.sleep(0.001) # be nice to the system :)
-
-Older versions of redis-py only read messages with `pubsub.listen()`. listen()
-is a generator that blocks until a message is available. If your application
-doesn't need to do anything else but receive and act on messages received from
-redis, listen() is an easy way to get up an running.
-
-.. code-block:: pycon
-
- >>> for message in p.listen():
- ... # do something with the message
-
-The third option runs an event loop in a separate thread.
-`pubsub.run_in_thread()` creates a new thread and starts the event loop. The
-thread object is returned to the caller of `run_in_thread()`. The caller can
-use the `thread.stop()` method to shut down the event loop and thread. Behind
-the scenes, this is simply a wrapper around `get_message()` that runs in a
-separate thread, essentially creating a tiny non-blocking event loop for you.
-`run_in_thread()` takes an optional `sleep_time` argument. If specified, the
-event loop will call `time.sleep()` with the value in each iteration of the
-loop.
-
-Note: Since we're running in a separate thread, there's no way to handle
-messages that aren't automatically handled with registered message handlers.
-Therefore, redis-py prevents you from calling `run_in_thread()` if you're
-subscribed to patterns or channels that don't have message handlers attached.
-
-.. code-block:: pycon
-
- >>> p.subscribe(**{'my-channel': my_handler})
- >>> thread = p.run_in_thread(sleep_time=0.001)
- # the event loop is now running in the background processing messages
- # when it's time to shut it down...
- >>> thread.stop()
-
-A PubSub object adheres to the same encoding semantics as the client instance
-it was created from. Any channel or pattern that's unicode will be encoded
-using the `charset` specified on the client before being sent to Redis. If the
-client's `decode_responses` flag is set the False (the default), the
-'channel', 'pattern' and 'data' values in message dictionaries will be byte
-strings (str on Python 2, bytes on Python 3). If the client's
-`decode_responses` is True, then the 'channel', 'pattern' and 'data' values
-will be automatically decoded to unicode strings using the client's `charset`.
-
-PubSub objects remember what channels and patterns they are subscribed to. In
-the event of a disconnection such as a network error or timeout, the
-PubSub object will re-subscribe to all prior channels and patterns when
-reconnecting. Messages that were published while the client was disconnected
-cannot be delivered. When you're finished with a PubSub object, call its
-`.close()` method to shutdown the connection.
-
-.. code-block:: pycon
-
- >>> p = r.pubsub()
- >>> ...
- >>> p.close()
-
-LUA Scripting
-^^^^^^^^^^^^^
-
-redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are
-a number of edge cases that make these commands tedious to use in real world
-scenarios. Therefore, redis-py exposes a Script object that makes scripting
-much easier to use.
-
-To create a Script instance, use the `register_script` function on a client
-instance passing the LUA code as the first argument. `register_script` returns
-a Script instance that you can use throughout your code.
-
-The following trivial LUA script accepts two parameters: the name of a key and
-a multiplier value. The script fetches the value stored in the key, multiplies
-it with the multiplier value and returns the result.
-
-.. code-block:: pycon
-
- >>> r = redis.StrictRedis()
- >>> lua = """
- ... local value = redis.call('GET', KEYS[1])
- ... value = tonumber(value)
- ... return value * ARGV[1]"""
- >>> multiply = r.register_script(lua)
-
-`multiply` is now a Script instance that is invoked by calling it like a
-function. Script instances accept the following optional arguments:
-
-* **keys**: A list of key names that the script will access. This becomes the
- KEYS list in LUA.
-* **args**: A list of argument values. This becomes the ARGV list in LUA.
-* **client**: A redis-py Client or Pipeline instance that will invoke the
- script. If client isn't specified, the client that intiially
- created the Script instance (the one that `register_script` was
- invoked from) will be used.
-
-Continuing the example from above:
-
-.. code-block:: pycon
-
- >>> r.set('foo', 2)
- >>> multiply(keys=['foo'], args=[5])
- 10
-
-The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is
-passed to the script along with the multiplier value of 5. LUA executes the
-script and returns the result, 10.
-
-Script instances can be executed using a different client instance, even one
-that points to a completely different Redis server.
-
-.. code-block:: pycon
-
- >>> r2 = redis.StrictRedis('redis2.example.com')
- >>> r2.set('foo', 3)
- >>> multiply(keys=['foo'], args=[5], client=r2)
- 15
-
-The Script object ensures that the LUA script is loaded into Redis's script
-cache. In the event of a NOSCRIPT error, it will load the script and retry
-executing it.
-
-Script objects can also be used in pipelines. The pipeline instance should be
-passed as the client argument when calling the script. Care is taken to ensure
-that the script is registered in Redis's script cache just prior to pipeline
-execution.
-
-.. code-block:: pycon
-
- >>> pipe = r.pipeline()
- >>> pipe.set('foo', 5)
- >>> multiply(keys=['foo'], args=[5], client=pipe)
- >>> pipe.execute()
- [True, 25]
-
-Sentinel support
-^^^^^^^^^^^^^^^^
-
-redis-py can be used together with `Redis Sentinel `_
-to discover Redis nodes. You need to have at least one Sentinel daemon running
-in order to use redis-py's Sentinel support.
-
-Connecting redis-py to the Sentinel instance(s) is easy. You can use a
-Sentinel connection to discover the master and slaves network addresses:
-
-.. code-block:: pycon
-
- >>> from redis.sentinel import Sentinel
- >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
- >>> sentinel.discover_master('mymaster')
- ('127.0.0.1', 6379)
- >>> sentinel.discover_slaves('mymaster')
- [('127.0.0.1', 6380)]
-
-You can also create Redis client connections from a Sentinel instance. You can
-connect to either the master (for write operations) or a slave (for read-only
-operations).
-
-.. code-block:: pycon
-
- >>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
- >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
- >>> master.set('foo', 'bar')
- >>> slave.get('foo')
- 'bar'
-
-The master and slave objects are normal StrictRedis instances with their
-connection pool bound to the Sentinel instance. When a Sentinel backed client
-attempts to establish a connection, it first queries the Sentinel servers to
-determine an appropriate host to connect to. If no server is found,
-a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are
-subclasses of ConnectionError.
-
-When trying to connect to a slave client, the Sentinel connection pool will
-iterate over the list of slaves until it finds one that can be connected to.
-If no slaves can be connected to, a connection will be established with the
-master.
-
-See `Guidelines for Redis clients with support for Redis Sentinel
-`_ to learn more about Redis Sentinel.
-
-Scan Iterators
-^^^^^^^^^^^^^^
-
-The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use. While
-these commands are fully supported, redis-py also exposes the following methods
-that return Python iterators for convenience: `scan_iter`, `hscan_iter`,
-`sscan_iter` and `zscan_iter`.
-
-.. code-block:: pycon
-
- >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')):
- ... r.set(key, value)
- >>> for key in r.scan_iter():
- ... print key, r.get(key)
- A 1
- B 2
- C 3
-
-Author
-^^^^^^
-
-redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com).
-It can be found here: http://github.com/andymccurdy/redis-py
-
-Special thanks to:
-
-* Ludovico Magnocavallo, author of the original Python Redis client, from
- which some of the socket code is still used.
-* Alexander Solovyov for ideas on the generic response callback system.
-* Paul Hubbard for initial packaging support.
-
diff --git a/benchmarks/base.py b/benchmarks/base.py
index a97001f234..f52657f072 100644
--- a/benchmarks/base.py
+++ b/benchmarks/base.py
@@ -1,12 +1,12 @@
import functools
import itertools
-import redis
import sys
import timeit
-from redis._compat import izip
+
+import redis
-class Benchmark(object):
+class Benchmark:
ARGUMENTS = ()
def __init__(self):
@@ -16,12 +16,10 @@ def get_client(self, **kwargs):
# eventually make this more robust and take optional args from
# argparse
if self._client is None or kwargs:
- defaults = {
- 'db': 9
- }
+ defaults = {"db": 9}
defaults.update(kwargs)
pool = redis.ConnectionPool(**kwargs)
- self._client = redis.StrictRedis(connection_pool=pool)
+ self._client = redis.Redis(connection_pool=pool)
return self._client
def setup(self, **kwargs):
@@ -31,16 +29,16 @@ def run(self, **kwargs):
pass
def run_benchmark(self):
- group_names = [group['name'] for group in self.ARGUMENTS]
- group_values = [group['values'] for group in self.ARGUMENTS]
+ group_names = [group["name"] for group in self.ARGUMENTS]
+ group_values = [group["values"] for group in self.ARGUMENTS]
for value_set in itertools.product(*group_values):
- pairs = list(izip(group_names, value_set))
- arg_string = ', '.join(['%s=%s' % (p[0], p[1]) for p in pairs])
- sys.stdout.write('Benchmark: %s... ' % arg_string)
+ pairs = list(zip(group_names, value_set))
+ arg_string = ", ".join(f"{p[0]}={p[1]}" for p in pairs)
+ sys.stdout.write(f"Benchmark: {arg_string}... ")
sys.stdout.flush()
kwargs = dict(pairs)
setup = functools.partial(self.setup, **kwargs)
run = functools.partial(self.run, **kwargs)
t = timeit.timeit(stmt=run, setup=setup, number=1000)
- sys.stdout.write('%f\n' % t)
+ sys.stdout.write(f"{t:f}\n")
sys.stdout.flush()
diff --git a/benchmarks/basic_operations.py b/benchmarks/basic_operations.py
new file mode 100644
index 0000000000..c9f5853652
--- /dev/null
+++ b/benchmarks/basic_operations.py
@@ -0,0 +1,193 @@
+import time
+from argparse import ArgumentParser
+from functools import wraps
+
+import redis
+
+
+def parse_args():
+ parser = ArgumentParser()
+ parser.add_argument(
+ "-n", type=int, help="Total number of requests (default 100000)", default=100000
+ )
+ parser.add_argument(
+ "-P",
+ type=int,
+ help=("Pipeline requests. Default 1 (no pipeline)."),
+ default=1,
+ )
+ parser.add_argument(
+ "-s",
+ type=int,
+ help="Data size of SET/GET value in bytes (default 2)",
+ default=2,
+ )
+
+ args = parser.parse_args()
+ return args
+
+
+def run():
+ args = parse_args()
+ r = redis.Redis()
+ r.flushall()
+ set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ get_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ get_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ incr(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ lpush(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ lrange_300(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ lpop(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+ hmset(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
+
+
+def timer(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ start = time.monotonic()
+ ret = func(*args, **kwargs)
+ duration = time.monotonic() - start
+ if "num" in kwargs:
+ count = kwargs["num"]
+ else:
+ count = args[1]
+ print(f"{func.__name__} - {count} Requests")
+ print(f"Duration = {duration}")
+ print(f"Rate = {count/duration}")
+ print()
+ return ret
+
+ return wrapper
+
+
+@timer
+def set_str(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ set_data = "a".ljust(data_size, "0")
+ for i in range(num):
+ conn.set(f"set_str:{i}", set_data)
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def set_int(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ set_data = 10 ** (data_size - 1)
+ for i in range(num):
+ conn.set(f"set_int:{i}", set_data)
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def get_str(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ for i in range(num):
+ conn.get(f"set_str:{i}")
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def get_int(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ for i in range(num):
+ conn.get(f"set_int:{i}")
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def incr(conn, num, pipeline_size, *args, **kwargs):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ for i in range(num):
+ conn.incr("incr_key")
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def lpush(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ set_data = 10 ** (data_size - 1)
+ for i in range(num):
+ conn.lpush("lpush_key", set_data)
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def lrange_300(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ for i in range(num):
+ conn.lrange("lpush_key", i, i + 300)
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def lpop(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+ for i in range(num):
+ conn.lpop("lpush_key")
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+ if pipeline_size > 1:
+ conn.execute()
+
+
+@timer
+def hmset(conn, num, pipeline_size, data_size):
+ if pipeline_size > 1:
+ conn = conn.pipeline()
+
+ set_data = {"str_value": "string", "int_value": 123456, "float_value": 123456.0}
+ for i in range(num):
+ conn.hmset("hmset_key", set_data)
+ if pipeline_size > 1 and i % pipeline_size == 0:
+ conn.execute()
+
+ if pipeline_size > 1:
+ conn.execute()
+
+
+if __name__ == "__main__":
+ run()
diff --git a/benchmarks/cluster_async.py b/benchmarks/cluster_async.py
new file mode 100644
index 0000000000..17dd52b5df
--- /dev/null
+++ b/benchmarks/cluster_async.py
@@ -0,0 +1,263 @@
+import asyncio
+import functools
+import time
+
+import aioredis_cluster
+import aredis
+import uvloop
+
+import redis.asyncio as redispy
+
+
+def timer(func):
+ @functools.wraps(func)
+ async def wrapper(*args, **kwargs):
+ tic = time.perf_counter()
+ await func(*args, **kwargs)
+ toc = time.perf_counter()
+ return f"{toc - tic:.4f}"
+
+ return wrapper
+
+
+@timer
+async def set_str(client, gather, data):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.set(f"bench:str_{i}", data))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.set(f"bench:str_{i}", data)
+
+
+@timer
+async def set_int(client, gather, data):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.set(f"bench:int_{i}", data))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.set(f"bench:int_{i}", data)
+
+
+@timer
+async def get_str(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(asyncio.create_task(client.get(f"bench:str_{i}")) for i in range(100))
+ )
+ else:
+ for i in range(count):
+ await client.get(f"bench:str_{i}")
+
+
+@timer
+async def get_int(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(asyncio.create_task(client.get(f"bench:int_{i}")) for i in range(100))
+ )
+ else:
+ for i in range(count):
+ await client.get(f"bench:int_{i}")
+
+
+@timer
+async def hset(client, gather, data):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.hset("bench:hset", str(i), data))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.hset("bench:hset", str(i), data)
+
+
+@timer
+async def hget(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.hget("bench:hset", str(i)))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.hget("bench:hset", str(i))
+
+
+@timer
+async def incr(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(asyncio.create_task(client.incr("bench:incr")) for i in range(100))
+ )
+ else:
+ for i in range(count):
+ await client.incr("bench:incr")
+
+
+@timer
+async def lpush(client, gather, data):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.lpush("bench:lpush", data))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.lpush("bench:lpush", data)
+
+
+@timer
+async def lrange_300(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(
+ asyncio.create_task(client.lrange("bench:lpush", i, i + 300))
+ for i in range(100)
+ )
+ )
+ else:
+ for i in range(count):
+ await client.lrange("bench:lpush", i, i + 300)
+
+
+@timer
+async def lpop(client, gather):
+ if gather:
+ for _ in range(count // 100):
+ await asyncio.gather(
+ *(asyncio.create_task(client.lpop("bench:lpush")) for i in range(100))
+ )
+ else:
+ for i in range(count):
+ await client.lpop("bench:lpush")
+
+
+@timer
+async def warmup(client):
+ await asyncio.gather(
+ *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100))
+ )
+
+
+@timer
+async def run(client, gather):
+ data_str = "a" * size
+ data_int = int("1" * size)
+
+ if gather is False:
+ for ret in await asyncio.gather(
+ asyncio.create_task(set_str(client, gather, data_str)),
+ asyncio.create_task(set_int(client, gather, data_int)),
+ asyncio.create_task(hset(client, gather, data_str)),
+ asyncio.create_task(incr(client, gather)),
+ asyncio.create_task(lpush(client, gather, data_int)),
+ ):
+ print(ret)
+ for ret in await asyncio.gather(
+ asyncio.create_task(get_str(client, gather)),
+ asyncio.create_task(get_int(client, gather)),
+ asyncio.create_task(hget(client, gather)),
+ asyncio.create_task(lrange_300(client, gather)),
+ asyncio.create_task(lpop(client, gather)),
+ ):
+ print(ret)
+ else:
+ print(await set_str(client, gather, data_str))
+ print(await set_int(client, gather, data_int))
+ print(await hset(client, gather, data_str))
+ print(await incr(client, gather))
+ print(await lpush(client, gather, data_int))
+
+ print(await get_str(client, gather))
+ print(await get_int(client, gather))
+ print(await hget(client, gather))
+ print(await lrange_300(client, gather))
+ print(await lpop(client, gather))
+
+
+async def main(loop, gather=None):
+ arc = aredis.StrictRedisCluster(
+ host=host,
+ port=port,
+ password=password,
+ max_connections=2**31,
+ max_connections_per_node=2**31,
+ readonly=False,
+ reinitialize_steps=count,
+ skip_full_coverage_check=True,
+ decode_responses=False,
+ max_idle_time=count,
+ idle_check_interval=count,
+ )
+ print(f"{loop} {gather} {await warmup(arc)} aredis")
+ print(await run(arc, gather=gather))
+ arc.connection_pool.disconnect()
+
+ aiorc = await aioredis_cluster.create_redis_cluster(
+ [(host, port)],
+ password=password,
+ state_reload_interval=count,
+ idle_connection_timeout=count,
+ pool_maxsize=2**31,
+ )
+ print(f"{loop} {gather} {await warmup(aiorc)} aioredis-cluster")
+ print(await run(aiorc, gather=gather))
+ aiorc.close()
+ await aiorc.wait_closed()
+
+ async with redispy.RedisCluster(
+ host=host,
+ port=port,
+ password=password,
+ reinitialize_steps=count,
+ read_from_replicas=False,
+ decode_responses=False,
+ max_connections=2**31,
+ ) as rca:
+ print(f"{loop} {gather} {await warmup(rca)} redispy")
+ print(await run(rca, gather=gather))
+
+
+if __name__ == "__main__":
+ host = "localhost"
+ port = 16379
+ password = None
+
+ count = 10000
+ size = 256
+
+ asyncio.run(main("asyncio"))
+ asyncio.run(main("asyncio", gather=False))
+ asyncio.run(main("asyncio", gather=True))
+
+ uvloop.install()
+
+ asyncio.run(main("uvloop"))
+ asyncio.run(main("uvloop", gather=False))
+ asyncio.run(main("uvloop", gather=True))
diff --git a/benchmarks/cluster_async_pipeline.py b/benchmarks/cluster_async_pipeline.py
new file mode 100644
index 0000000000..af45b44511
--- /dev/null
+++ b/benchmarks/cluster_async_pipeline.py
@@ -0,0 +1,107 @@
+import asyncio
+import functools
+import time
+
+import aioredis_cluster
+import aredis
+import uvloop
+
+import redis.asyncio as redispy
+
+
+def timer(func):
+ @functools.wraps(func)
+ async def wrapper(*args, **kwargs):
+ tic = time.perf_counter()
+ await func(*args, **kwargs)
+ toc = time.perf_counter()
+ return f"{toc - tic:.4f}"
+
+ return wrapper
+
+
+@timer
+async def warmup(client):
+ await asyncio.gather(
+ *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100))
+ )
+
+
+@timer
+async def run(client):
+ data_str = "a" * size
+ data_int = int("1" * size)
+
+ for i in range(count):
+ with client.pipeline() as pipe:
+ await (
+ pipe.set(f"bench:str_{i}", data_str)
+ .set(f"bench:int_{i}", data_int)
+ .get(f"bench:str_{i}")
+ .get(f"bench:int_{i}")
+ .hset("bench:hset", str(i), data_str)
+ .hget("bench:hset", str(i))
+ .incr("bench:incr")
+ .lpush("bench:lpush", data_int)
+ .lrange("bench:lpush", 0, 300)
+ .lpop("bench:lpush")
+ .execute()
+ )
+
+
+async def main(loop):
+ arc = aredis.StrictRedisCluster(
+ host=host,
+ port=port,
+ password=password,
+ max_connections=2**31,
+ max_connections_per_node=2**31,
+ readonly=False,
+ reinitialize_steps=count,
+ skip_full_coverage_check=True,
+ decode_responses=False,
+ max_idle_time=count,
+ idle_check_interval=count,
+ )
+ print(f"{loop} {await warmup(arc)} aredis")
+ print(await run(arc))
+ arc.connection_pool.disconnect()
+
+ aiorc = await aioredis_cluster.create_redis_cluster(
+ [(host, port)],
+ password=password,
+ state_reload_interval=count,
+ idle_connection_timeout=count,
+ pool_maxsize=2**31,
+ )
+ print(f"{loop} {await warmup(aiorc)} aioredis-cluster")
+ print(await run(aiorc))
+ aiorc.close()
+ await aiorc.wait_closed()
+
+ async with redispy.RedisCluster(
+ host=host,
+ port=port,
+ password=password,
+ reinitialize_steps=count,
+ read_from_replicas=False,
+ decode_responses=False,
+ max_connections=2**31,
+ ) as rca:
+ print(f"{loop} {await warmup(rca)} redispy")
+ print(await run(rca))
+
+
+if __name__ == "__main__":
+ host = "localhost"
+ port = 16379
+ password = None
+
+ count = 10000
+ size = 256
+
+ asyncio.run(main("asyncio"))
+
+ uvloop.install()
+
+ asyncio.run(main("uvloop"))
diff --git a/benchmarks/command_packer_benchmark.py b/benchmarks/command_packer_benchmark.py
index 13d6f97761..e66dbbcbf9 100644
--- a/benchmarks/command_packer_benchmark.py
+++ b/benchmarks/command_packer_benchmark.py
@@ -1,43 +1,44 @@
-import socket
-import sys
-from redis.connection import (Connection, SYM_STAR, SYM_DOLLAR, SYM_EMPTY,
- SYM_CRLF, b)
-from redis._compat import imap
from base import Benchmark
+from redis.connection import SYM_CRLF, SYM_DOLLAR, SYM_EMPTY, SYM_STAR, Connection
+
class StringJoiningConnection(Connection):
- def send_packed_command(self, command):
+ def send_packed_command(self, command, check_health=True):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
self._sock.sendall(command)
- except socket.error:
- e = sys.exc_info()[1]
+ except OSError as e:
self.disconnect()
if len(e.args) == 1:
- _errno, errmsg = 'UNKNOWN', e.args[0]
+ _errno, errmsg = "UNKNOWN", e.args[0]
else:
_errno, errmsg = e.args
- raise ConnectionError("Error %s while writing to socket. %s." %
- (_errno, errmsg))
- except:
+ raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.")
+ except Exception:
self.disconnect()
raise
def pack_command(self, *args):
"Pack a series of arguments into a value Redis command"
- args_output = SYM_EMPTY.join([
- SYM_EMPTY.join((SYM_DOLLAR, b(str(len(k))), SYM_CRLF, k, SYM_CRLF))
- for k in imap(self.encode, args)])
+ args_output = SYM_EMPTY.join(
+ [
+ SYM_EMPTY.join(
+ (SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF)
+ )
+ for k in map(self.encoder.encode, args)
+ ]
+ )
output = SYM_EMPTY.join(
- (SYM_STAR, b(str(len(args))), SYM_CRLF, args_output))
+ (SYM_STAR, str(len(args)).encode(), SYM_CRLF, args_output)
+ )
return output
class ListJoiningConnection(Connection):
- def send_packed_command(self, command):
+ def send_packed_command(self, command, check_health=True):
if not self._sock:
self.connect()
try:
@@ -45,34 +46,33 @@ def send_packed_command(self, command):
command = [command]
for item in command:
self._sock.sendall(item)
- except socket.error:
- e = sys.exc_info()[1]
+ except OSError as e:
self.disconnect()
if len(e.args) == 1:
- _errno, errmsg = 'UNKNOWN', e.args[0]
+ _errno, errmsg = "UNKNOWN", e.args[0]
else:
_errno, errmsg = e.args
- raise ConnectionError("Error %s while writing to socket. %s." %
- (_errno, errmsg))
- except:
+ raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.")
+ except Exception:
self.disconnect()
raise
def pack_command(self, *args):
output = []
- buff = SYM_EMPTY.join(
- (SYM_STAR, b(str(len(args))), SYM_CRLF))
+ buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
- for k in imap(self.encode, args):
+ for k in map(self.encoder.encode, args):
if len(buff) > 6000 or len(k) > 6000:
buff = SYM_EMPTY.join(
- (buff, SYM_DOLLAR, b(str(len(k))), SYM_CRLF))
+ (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF)
+ )
output.append(buff)
output.append(k)
buff = SYM_CRLF
else:
- buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(k))),
- SYM_CRLF, k, SYM_CRLF))
+ buff = SYM_EMPTY.join(
+ (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF)
+ )
output.append(buff)
return output
@@ -81,13 +81,12 @@ class CommandPackerBenchmark(Benchmark):
ARGUMENTS = (
{
- 'name': 'connection_class',
- 'values': [StringJoiningConnection, ListJoiningConnection]
+ "name": "connection_class",
+ "values": [StringJoiningConnection, ListJoiningConnection],
},
{
- 'name': 'value_size',
- 'values': [10, 100, 1000, 10000, 100000, 1000000, 10000000,
- 100000000]
+ "name": "value_size",
+ "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000],
},
)
@@ -96,9 +95,9 @@ def setup(self, connection_class, value_size):
def run(self, connection_class, value_size):
r = self.get_client()
- x = 'a' * value_size
- r.set('benchmark', x)
+ x = "a" * value_size
+ r.set("benchmark", x)
-if __name__ == '__main__':
+if __name__ == "__main__":
CommandPackerBenchmark().run_benchmark()
diff --git a/benchmarks/socket_read_size.py b/benchmarks/socket_read_size.py
index 72a1b0a7e3..3427956ced 100644
--- a/benchmarks/socket_read_size.py
+++ b/benchmarks/socket_read_size.py
@@ -1,34 +1,27 @@
-from redis.connection import PythonParser, HiredisParser
from base import Benchmark
+from redis.connection import HiredisParser, PythonParser
+
class SocketReadBenchmark(Benchmark):
ARGUMENTS = (
+ {"name": "parser", "values": [PythonParser, HiredisParser]},
{
- 'name': 'parser',
- 'values': [PythonParser, HiredisParser]
- },
- {
- 'name': 'value_size',
- 'values': [10, 100, 1000, 10000, 100000, 1000000, 10000000,
- 100000000]
+ "name": "value_size",
+ "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000],
},
- {
- 'name': 'read_size',
- 'values': [4096, 8192, 16384, 32768, 65536, 131072]
- }
+ {"name": "read_size", "values": [4096, 8192, 16384, 32768, 65536, 131072]},
)
def setup(self, value_size, read_size, parser):
- r = self.get_client(parser_class=parser,
- socket_read_size=read_size)
- r.set('benchmark', 'a' * value_size)
+ r = self.get_client(parser_class=parser, socket_read_size=read_size)
+ r.set("benchmark", "a" * value_size)
def run(self, value_size, read_size, parser):
r = self.get_client()
- r.get('benchmark')
+ r.get("benchmark")
-if __name__ == '__main__':
+if __name__ == "__main__":
SocketReadBenchmark().run_benchmark()
diff --git a/codecov.yml b/codecov.yml
new file mode 100644
index 0000000000..449ec0c50f
--- /dev/null
+++ b/codecov.yml
@@ -0,0 +1,13 @@
+ignore:
+ - "benchmarks/**"
+ - "tasks.py"
+
+codecov:
+ require_ci_to_pass: yes
+
+coverage:
+ precision: 2
+ round: down
+ range: "80...100"
+ status:
+ patch: off # off for now as it yells about everything
diff --git a/dev_requirements.txt b/dev_requirements.txt
new file mode 100644
index 0000000000..8285b0456f
--- /dev/null
+++ b/dev_requirements.txt
@@ -0,0 +1,18 @@
+click==8.0.4
+black==22.3.0
+flake8==5.0.4
+flynt~=0.69.0
+isort==5.10.1
+mock==4.0.3
+packaging>=20.4
+pytest==7.2.0
+pytest-timeout==2.0.1
+pytest-asyncio>=0.20.2
+tox==3.27.1
+tox-docker==3.1.0
+invoke==1.7.3
+pytest-cov>=4.0.0
+vulture>=2.3.0
+ujson>=4.2.0
+wheel>=0.30.0
+uvloop
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
new file mode 100644
index 0000000000..c76d15db36
--- /dev/null
+++ b/docker/base/Dockerfile
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py:6.2.6
+FROM redis:6.2.6-buster
+
+CMD ["redis-server", "/redis.conf"]
diff --git a/docker/base/Dockerfile.cluster b/docker/base/Dockerfile.cluster
new file mode 100644
index 0000000000..5c246dcf28
--- /dev/null
+++ b/docker/base/Dockerfile.cluster
@@ -0,0 +1,11 @@
+# produces redisfab/redis-py-cluster:6.2.6
+FROM redis:6.2.6-buster
+
+COPY create_cluster.sh /create_cluster.sh
+RUN chmod +x /create_cluster.sh
+
+EXPOSE 16379 16380 16381 16382 16383 16384
+
+ENV START_PORT=16379
+ENV END_PORT=16384
+CMD /create_cluster.sh
diff --git a/docker/base/Dockerfile.cluster4 b/docker/base/Dockerfile.cluster4
new file mode 100644
index 0000000000..3158d6edd4
--- /dev/null
+++ b/docker/base/Dockerfile.cluster4
@@ -0,0 +1,9 @@
+# produces redisfab/redis-py-cluster:4.0
+FROM redis:4.0-buster
+
+COPY create_cluster4.sh /create_cluster4.sh
+RUN chmod +x /create_cluster4.sh
+
+EXPOSE 16391 16392 16393 16394 16395 16396
+
+CMD [ "/create_cluster4.sh"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.cluster5 b/docker/base/Dockerfile.cluster5
new file mode 100644
index 0000000000..3becfc853a
--- /dev/null
+++ b/docker/base/Dockerfile.cluster5
@@ -0,0 +1,9 @@
+# produces redisfab/redis-py-cluster:5.0
+FROM redis:5.0-buster
+
+COPY create_cluster5.sh /create_cluster5.sh
+RUN chmod +x /create_cluster5.sh
+
+EXPOSE 16385 16386 16387 16388 16389 16390
+
+CMD [ "/create_cluster5.sh"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redis4 b/docker/base/Dockerfile.redis4
new file mode 100644
index 0000000000..7528ac1631
--- /dev/null
+++ b/docker/base/Dockerfile.redis4
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py:4.0
+FROM redis:4.0-buster
+
+CMD ["redis-server", "/redis.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redis5 b/docker/base/Dockerfile.redis5
new file mode 100644
index 0000000000..6bcbe20bfc
--- /dev/null
+++ b/docker/base/Dockerfile.redis5
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py:5.0
+FROM redis:5.0-buster
+
+CMD ["redis-server", "/redis.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.redismod_cluster b/docker/base/Dockerfile.redismod_cluster
new file mode 100644
index 0000000000..5b80e495fb
--- /dev/null
+++ b/docker/base/Dockerfile.redismod_cluster
@@ -0,0 +1,12 @@
+# produces redisfab/redis-py-modcluster:6.2.6
+FROM redislabs/redismod:edge
+
+COPY create_redismod_cluster.sh /create_redismod_cluster.sh
+RUN chmod +x /create_redismod_cluster.sh
+
+EXPOSE 46379 46380 46381 46382 46383 46384
+
+ENV START_PORT=46379
+ENV END_PORT=46384
+ENTRYPOINT []
+CMD /create_redismod_cluster.sh
diff --git a/docker/base/Dockerfile.sentinel b/docker/base/Dockerfile.sentinel
new file mode 100644
index 0000000000..ef659e3004
--- /dev/null
+++ b/docker/base/Dockerfile.sentinel
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py-sentinel:6.2.6
+FROM redis:6.2.6-buster
+
+CMD ["redis-sentinel", "/sentinel.conf"]
diff --git a/docker/base/Dockerfile.sentinel4 b/docker/base/Dockerfile.sentinel4
new file mode 100644
index 0000000000..45bb03e88e
--- /dev/null
+++ b/docker/base/Dockerfile.sentinel4
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py-sentinel:4.0
+FROM redis:4.0-buster
+
+CMD ["redis-sentinel", "/sentinel.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.sentinel5 b/docker/base/Dockerfile.sentinel5
new file mode 100644
index 0000000000..6958154e46
--- /dev/null
+++ b/docker/base/Dockerfile.sentinel5
@@ -0,0 +1,4 @@
+# produces redisfab/redis-py-sentinel:5.0
+FROM redis:5.0-buster
+
+CMD ["redis-sentinel", "/sentinel.conf"]
\ No newline at end of file
diff --git a/docker/base/Dockerfile.stunnel b/docker/base/Dockerfile.stunnel
new file mode 100644
index 0000000000..bf4510907c
--- /dev/null
+++ b/docker/base/Dockerfile.stunnel
@@ -0,0 +1,11 @@
+# produces redisfab/stunnel:latest
+FROM ubuntu:18.04
+
+RUN apt-get update -qq --fix-missing
+RUN apt-get upgrade -qqy
+RUN apt install -qqy stunnel
+RUN mkdir -p /etc/stunnel/conf.d
+RUN echo "foreground = yes\ninclude = /etc/stunnel/conf.d" > /etc/stunnel/stunnel.conf
+RUN chown -R root:root /etc/stunnel/
+
+CMD ["/usr/bin/stunnel"]
diff --git a/docker/base/Dockerfile.unstable b/docker/base/Dockerfile.unstable
new file mode 100644
index 0000000000..ab5b7fc6fb
--- /dev/null
+++ b/docker/base/Dockerfile.unstable
@@ -0,0 +1,18 @@
+# produces redisfab/redis-py:unstable
+FROM ubuntu:bionic as builder
+RUN apt-get update
+RUN apt-get upgrade -y
+RUN apt-get install -y build-essential git
+RUN mkdir /build
+WORKDIR /build
+RUN git clone https://github.com/redis/redis
+WORKDIR /build/redis
+RUN make
+
+FROM ubuntu:bionic as runner
+COPY --from=builder /build/redis/src/redis-server /usr/bin/redis-server
+COPY --from=builder /build/redis/src/redis-cli /usr/bin/redis-cli
+COPY --from=builder /build/redis/src/redis-sentinel /usr/bin/redis-sentinel
+
+EXPOSE 6379
+CMD ["redis-server", "/redis.conf"]
diff --git a/docker/base/Dockerfile.unstable_cluster b/docker/base/Dockerfile.unstable_cluster
new file mode 100644
index 0000000000..2e3ed55371
--- /dev/null
+++ b/docker/base/Dockerfile.unstable_cluster
@@ -0,0 +1,11 @@
+# produces redisfab/redis-py-cluster:6.2.6
+FROM redisfab/redis-py:unstable-bionic
+
+COPY create_cluster.sh /create_cluster.sh
+RUN chmod +x /create_cluster.sh
+
+EXPOSE 6372 6373 6374 6375 6376 6377
+
+ENV START_PORT=6372
+ENV END_PORT=6377
+CMD ["/create_cluster.sh"]
diff --git a/docker/base/Dockerfile.unstable_sentinel b/docker/base/Dockerfile.unstable_sentinel
new file mode 100644
index 0000000000..fe6d062de8
--- /dev/null
+++ b/docker/base/Dockerfile.unstable_sentinel
@@ -0,0 +1,17 @@
+# produces redisfab/redis-py-sentinel:unstable
+FROM ubuntu:bionic as builder
+RUN apt-get update
+RUN apt-get upgrade -y
+RUN apt-get install -y build-essential git
+RUN mkdir /build
+WORKDIR /build
+RUN git clone https://github.com/redis/redis
+WORKDIR /build/redis
+RUN make
+
+FROM ubuntu:bionic as runner
+COPY --from=builder /build/redis/src/redis-server /usr/bin/redis-server
+COPY --from=builder /build/redis/src/redis-cli /usr/bin/redis-cli
+COPY --from=builder /build/redis/src/redis-sentinel /usr/bin/redis-sentinel
+
+CMD ["redis-sentinel", "/sentinel.conf"]
diff --git a/docker/base/README.md b/docker/base/README.md
new file mode 100644
index 0000000000..a2f26a8106
--- /dev/null
+++ b/docker/base/README.md
@@ -0,0 +1 @@
+Dockers in this folder are built, and uploaded to the redisfab dockerhub store.
diff --git a/docker/base/create_cluster.sh b/docker/base/create_cluster.sh
new file mode 100755
index 0000000000..fcb1b1cd8d
--- /dev/null
+++ b/docker/base/create_cluster.sh
@@ -0,0 +1,46 @@
+#! /bin/bash
+
+mkdir -p /nodes
+touch /nodes/nodemap
+if [ -z ${START_PORT} ]; then
+ START_PORT=16379
+fi
+if [ -z ${END_PORT} ]; then
+ END_PORT=16384
+fi
+if [ ! -z "$3" ]; then
+ START_PORT=$2
+ START_PORT=$3
+fi
+echo "STARTING: ${START_PORT}"
+echo "ENDING: ${END_PORT}"
+
+for PORT in `seq ${START_PORT} ${END_PORT}`; do
+ mkdir -p /nodes/$PORT
+ if [[ -e /redis.conf ]]; then
+ cp /redis.conf /nodes/$PORT/redis.conf
+ else
+ touch /nodes/$PORT/redis.conf
+ fi
+ cat << EOF >> /nodes/$PORT/redis.conf
+port ${PORT}
+cluster-enabled yes
+daemonize yes
+logfile /redis.log
+dir /nodes/$PORT
+EOF
+
+ set -x
+ redis-server /nodes/$PORT/redis.conf
+ if [ $? -ne 0 ]; then
+ echo "Redis failed to start, exiting."
+ continue
+ fi
+ echo 127.0.0.1:$PORT >> /nodes/nodemap
+done
+if [ -z "${REDIS_PASSWORD}" ]; then
+ echo yes | redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+else
+ echo yes | redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+fi
+tail -f /redis.log
diff --git a/docker/base/create_cluster4.sh b/docker/base/create_cluster4.sh
new file mode 100755
index 0000000000..a39da58784
--- /dev/null
+++ b/docker/base/create_cluster4.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+mkdir -p /nodes
+touch /nodes/nodemap
+for PORT in $(seq 16391 16396); do
+ mkdir -p /nodes/$PORT
+ if [[ -e /redis.conf ]]; then
+ cp /redis.conf /nodes/$PORT/redis.conf
+ else
+ touch /nodes/$PORT/redis.conf
+ fi
+ cat << EOF >> /nodes/$PORT/redis.conf
+port ${PORT}
+cluster-enabled yes
+daemonize yes
+logfile /redis.log
+dir /nodes/$PORT
+EOF
+ redis-server /nodes/$PORT/redis.conf
+ if [ $? -ne 0 ]; then
+ echo "Redis failed to start, exiting."
+ exit 3
+ fi
+ echo 127.0.0.1:$PORT >> /nodes/nodemap
+done
+echo yes | redis-cli --cluster create $(seq -f 127.0.0.1:%g 16391 16396) --cluster-replicas 1
+tail -f /redis.log
\ No newline at end of file
diff --git a/docker/base/create_cluster5.sh b/docker/base/create_cluster5.sh
new file mode 100755
index 0000000000..0c63d8e910
--- /dev/null
+++ b/docker/base/create_cluster5.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+mkdir -p /nodes
+touch /nodes/nodemap
+for PORT in $(seq 16385 16390); do
+ mkdir -p /nodes/$PORT
+ if [[ -e /redis.conf ]]; then
+ cp /redis.conf /nodes/$PORT/redis.conf
+ else
+ touch /nodes/$PORT/redis.conf
+ fi
+ cat << EOF >> /nodes/$PORT/redis.conf
+port ${PORT}
+cluster-enabled yes
+daemonize yes
+logfile /redis.log
+dir /nodes/$PORT
+EOF
+ redis-server /nodes/$PORT/redis.conf
+ if [ $? -ne 0 ]; then
+ echo "Redis failed to start, exiting."
+ exit 3
+ fi
+ echo 127.0.0.1:$PORT >> /nodes/nodemap
+done
+echo yes | redis-cli --cluster create $(seq -f 127.0.0.1:%g 16385 16390) --cluster-replicas 1
+tail -f /redis.log
\ No newline at end of file
diff --git a/docker/base/create_redismod_cluster.sh b/docker/base/create_redismod_cluster.sh
new file mode 100755
index 0000000000..20443a4c42
--- /dev/null
+++ b/docker/base/create_redismod_cluster.sh
@@ -0,0 +1,46 @@
+#! /bin/bash
+
+mkdir -p /nodes
+touch /nodes/nodemap
+if [ -z ${START_PORT} ]; then
+ START_PORT=46379
+fi
+if [ -z ${END_PORT} ]; then
+ END_PORT=46384
+fi
+if [ ! -z "$3" ]; then
+ START_PORT=$2
+ START_PORT=$3
+fi
+echo "STARTING: ${START_PORT}"
+echo "ENDING: ${END_PORT}"
+
+for PORT in `seq ${START_PORT} ${END_PORT}`; do
+ mkdir -p /nodes/$PORT
+ if [[ -e /redis.conf ]]; then
+ cp /redis.conf /nodes/$PORT/redis.conf
+ else
+ touch /nodes/$PORT/redis.conf
+ fi
+ cat << EOF >> /nodes/$PORT/redis.conf
+port ${PORT}
+cluster-enabled yes
+daemonize yes
+logfile /redis.log
+dir /nodes/$PORT
+EOF
+
+ set -x
+ redis-server /nodes/$PORT/redis.conf
+ if [ $? -ne 0 ]; then
+ echo "Redis failed to start, exiting."
+ continue
+ fi
+ echo 127.0.0.1:$PORT >> /nodes/nodemap
+done
+if [ -z "${REDIS_PASSWORD}" ]; then
+ echo yes | redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+else
+ echo yes | redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1
+fi
+tail -f /redis.log
diff --git a/docker/cluster/redis.conf b/docker/cluster/redis.conf
new file mode 100644
index 0000000000..dff658c79b
--- /dev/null
+++ b/docker/cluster/redis.conf
@@ -0,0 +1,3 @@
+# Redis Cluster config file will be shared across all nodes.
+# Do not change the following configurations that are already set:
+# port, cluster-enabled, daemonize, logfile, dir
diff --git a/docker/redis4/master/redis.conf b/docker/redis4/master/redis.conf
new file mode 100644
index 0000000000..b7ed0ebf00
--- /dev/null
+++ b/docker/redis4/master/redis.conf
@@ -0,0 +1,2 @@
+port 6381
+save ""
diff --git a/docker/redis4/sentinel/sentinel_1.conf b/docker/redis4/sentinel/sentinel_1.conf
new file mode 100644
index 0000000000..cfee17c051
--- /dev/null
+++ b/docker/redis4/sentinel/sentinel_1.conf
@@ -0,0 +1,6 @@
+port 26385
+
+sentinel monitor redis-py-test 127.0.0.1 6381 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis4/sentinel/sentinel_2.conf b/docker/redis4/sentinel/sentinel_2.conf
new file mode 100644
index 0000000000..68d930aea8
--- /dev/null
+++ b/docker/redis4/sentinel/sentinel_2.conf
@@ -0,0 +1,6 @@
+port 26386
+
+sentinel monitor redis-py-test 127.0.0.1 6381 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis4/sentinel/sentinel_3.conf b/docker/redis4/sentinel/sentinel_3.conf
new file mode 100644
index 0000000000..60abf65c9b
--- /dev/null
+++ b/docker/redis4/sentinel/sentinel_3.conf
@@ -0,0 +1,6 @@
+port 26387
+
+sentinel monitor redis-py-test 127.0.0.1 6381 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis5/master/redis.conf b/docker/redis5/master/redis.conf
new file mode 100644
index 0000000000..e479c48b28
--- /dev/null
+++ b/docker/redis5/master/redis.conf
@@ -0,0 +1,2 @@
+port 6382
+save ""
diff --git a/docker/redis5/replica/redis.conf b/docker/redis5/replica/redis.conf
new file mode 100644
index 0000000000..a2dc9e0945
--- /dev/null
+++ b/docker/redis5/replica/redis.conf
@@ -0,0 +1,3 @@
+port 6383
+save ""
+replicaof master 6382
diff --git a/docker/redis5/sentinel/sentinel_1.conf b/docker/redis5/sentinel/sentinel_1.conf
new file mode 100644
index 0000000000..c748a0ba72
--- /dev/null
+++ b/docker/redis5/sentinel/sentinel_1.conf
@@ -0,0 +1,6 @@
+port 26382
+
+sentinel monitor redis-py-test 127.0.0.1 6382 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis5/sentinel/sentinel_2.conf b/docker/redis5/sentinel/sentinel_2.conf
new file mode 100644
index 0000000000..0a50c9a623
--- /dev/null
+++ b/docker/redis5/sentinel/sentinel_2.conf
@@ -0,0 +1,6 @@
+port 26383
+
+sentinel monitor redis-py-test 127.0.0.1 6382 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis5/sentinel/sentinel_3.conf b/docker/redis5/sentinel/sentinel_3.conf
new file mode 100644
index 0000000000..a0e350ba0f
--- /dev/null
+++ b/docker/redis5/sentinel/sentinel_3.conf
@@ -0,0 +1,6 @@
+port 26384
+
+sentinel monitor redis-py-test 127.0.0.1 6383 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
\ No newline at end of file
diff --git a/docker/redis6.2/master/redis.conf b/docker/redis6.2/master/redis.conf
new file mode 100644
index 0000000000..15a31b5a38
--- /dev/null
+++ b/docker/redis6.2/master/redis.conf
@@ -0,0 +1,2 @@
+port 6379
+save ""
diff --git a/docker/redis6.2/replica/redis.conf b/docker/redis6.2/replica/redis.conf
new file mode 100644
index 0000000000..a76d402c5e
--- /dev/null
+++ b/docker/redis6.2/replica/redis.conf
@@ -0,0 +1,3 @@
+port 6380
+save ""
+replicaof master 6379
diff --git a/docker/redis6.2/sentinel/sentinel_1.conf b/docker/redis6.2/sentinel/sentinel_1.conf
new file mode 100644
index 0000000000..bd2d830af3
--- /dev/null
+++ b/docker/redis6.2/sentinel/sentinel_1.conf
@@ -0,0 +1,6 @@
+port 26379
+
+sentinel monitor redis-py-test 127.0.0.1 6379 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis6.2/sentinel/sentinel_2.conf b/docker/redis6.2/sentinel/sentinel_2.conf
new file mode 100644
index 0000000000..955621b872
--- /dev/null
+++ b/docker/redis6.2/sentinel/sentinel_2.conf
@@ -0,0 +1,6 @@
+port 26380
+
+sentinel monitor redis-py-test 127.0.0.1 6379 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis6.2/sentinel/sentinel_3.conf b/docker/redis6.2/sentinel/sentinel_3.conf
new file mode 100644
index 0000000000..62c40512f1
--- /dev/null
+++ b/docker/redis6.2/sentinel/sentinel_3.conf
@@ -0,0 +1,6 @@
+port 26381
+
+sentinel monitor redis-py-test 127.0.0.1 6379 2
+sentinel down-after-milliseconds redis-py-test 5000
+sentinel failover-timeout redis-py-test 60000
+sentinel parallel-syncs redis-py-test 1
diff --git a/docker/redis7/master/redis.conf b/docker/redis7/master/redis.conf
new file mode 100644
index 0000000000..ef57c1fe99
--- /dev/null
+++ b/docker/redis7/master/redis.conf
@@ -0,0 +1,4 @@
+port 6379
+save ""
+enable-debug-command yes
+enable-module-command yes
\ No newline at end of file
diff --git a/docker/redismod_cluster/redis.conf b/docker/redismod_cluster/redis.conf
new file mode 100644
index 0000000000..48f06668a8
--- /dev/null
+++ b/docker/redismod_cluster/redis.conf
@@ -0,0 +1,8 @@
+loadmodule /usr/lib/redis/modules/redisai.so
+loadmodule /usr/lib/redis/modules/redisearch.so
+loadmodule /usr/lib/redis/modules/redisgraph.so
+loadmodule /usr/lib/redis/modules/redistimeseries.so
+loadmodule /usr/lib/redis/modules/rejson.so
+loadmodule /usr/lib/redis/modules/redisbloom.so
+loadmodule /var/opt/redislabs/lib/modules/redisgears.so Plugin /var/opt/redislabs/modules/rg/plugin/gears_python.so Plugin /var/opt/redislabs/modules/rg/plugin/gears_jvm.so JvmOptions -Djava.class.path=/var/opt/redislabs/modules/rg/gear_runtime-jar-with-dependencies.jar JvmPath /var/opt/redislabs/modules/rg/OpenJDK/jdk-11.0.9.1+1/
+
diff --git a/docker/stunnel/README b/docker/stunnel/README
new file mode 100644
index 0000000000..e92ae78981
--- /dev/null
+++ b/docker/stunnel/README
@@ -0,0 +1 @@
+ This directory contains a helper script to create ssl certificates for ssl tests. If the certificates are out of date, re-run create_certs and check them in. These are snake oil certificates.
diff --git a/docker/stunnel/conf/redis.conf b/docker/stunnel/conf/redis.conf
new file mode 100644
index 0000000000..84f6d40133
--- /dev/null
+++ b/docker/stunnel/conf/redis.conf
@@ -0,0 +1,6 @@
+[redis]
+accept = 6666
+connect = master:6379
+cert = /etc/stunnel/keys/server-cert.pem
+key = /etc/stunnel/keys/server-key.pem
+verify = 0
diff --git a/docker/stunnel/create_certs.sh b/docker/stunnel/create_certs.sh
new file mode 100755
index 0000000000..f3bcea6f5d
--- /dev/null
+++ b/docker/stunnel/create_certs.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+set -e
+
+DESTDIR=`dirname "$0"`/keys
+test -d ${DESTDIR} || mkdir ${DESTDIR}
+cd ${DESTDIR}
+
+SSL_SUBJECT="/C=CA/ST=Winnipeg/L=Manitoba/O=Some Corp/OU=IT Department/CN=example.com"
+which openssl &>/dev/null
+if [ $? -ne 0 ]; then
+ echo "No openssl binary present, exiting."
+ exit 1
+fi
+
+openssl genrsa -out ca-key.pem 2048 &>/dev/null
+
+openssl req -new -x509 -nodes -days 365000 \
+ -key ca-key.pem \
+ -out ca-cert.pem \
+ -subj "${SSL_SUBJECT}" &>/dev/null
+
+openssl req -newkey rsa:2048 -nodes -days 365000 \
+ -keyout server-key.pem \
+ -out server-req.pem \
+ -subj "${SSL_SUBJECT}" &>/dev/null
+
+openssl x509 -req -days 365000 -set_serial 01 \
+ -in server-req.pem \
+ -out server-cert.pem \
+ -CA ca-cert.pem \
+ -CAkey ca-key.pem &>/dev/null
+
+openssl req -newkey rsa:2048 -nodes -days 365000 \
+ -keyout client-key.pem \
+ -out client-req.pem \
+ -subj "${SSL_SUBJECT}" &>/dev/null
+
+openssl x509 -req -days 365000 -set_serial 01 \
+ -in client-req.pem \
+ -out client-cert.pem \
+ -CA ca-cert.pem \
+ -CAkey ca-key.pem &>/dev/null
+
+echo "Keys generated in ${DESTDIR}:"
+ls
diff --git a/docker/stunnel/keys/ca-cert.pem b/docker/stunnel/keys/ca-cert.pem
new file mode 100644
index 0000000000..460354d9ad
--- /dev/null
+++ b/docker/stunnel/keys/ca-cert.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDaDCCAlACCQCui7X/vxmwGjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJD
+QTERMA8GA1UECAwIV2lubmlwZWcxETAPBgNVBAcMCE1hbml0b2JhMRIwEAYDVQQK
+DAlTb21lIENvcnAxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxFDASBgNVBAMMC2V4
+YW1wbGUuY29tMCAXDTIyMDExMjE0NTQyMVoYDzMwMjEwNTE1MTQ1NDIxWjB1MQsw
+CQYDVQQGEwJDQTERMA8GA1UECAwIV2lubmlwZWcxETAPBgNVBAcMCE1hbml0b2Jh
+MRIwEAYDVQQKDAlTb21lIENvcnAxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxFDAS
+BgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtSqof5fXyN/Y6GSLBBNA/zhrqw2qcBW4va6+Wc24WTaBXcP0w13njz+j1b5V
+9rbpz0i7WUkg3bBPecFFuCFyQnvn2JaE9b7kX1lLmszanrYfWQ9bYQyecox3HuYq
+eu330S+bD0liYh5rV7oEanuSCJW+a/dgEl3l/+Qb0zo2ZNEAXRuBv6lNmvBSsdIt
+lc5n/P06ntJ6Ia/7rO0ZEiBb6hLFKfiIo/XvDrGNlYulJEcDmC3PkzzJRGnA7R2F
+7Vggj4l4pGE/3EtnA4C/rd0Shf9TIPQFA2HOx3oYsrOonuBYM2urciNeojP5XGY/
+Zdau7hzgFBgF8tWsLU6bKyZ3NwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBJwnf3
+FARRxQF1Q2jIXQdyUS/lqq74C+PZF5IKOKb2K3dT1BFJlBgduvj4Ih5KUakImhMB
+SdaiwKzgB9APXNVAgrzSCb49PzXzvmaIFhPmBXSITFFfGupxpo0ZStwI03B0KZBs
+l3Zd0SzjKqZNVtTnxyDyWnYNFJtuCGanTjyPcCAFvVwzDQyzZ14liyM389WM950a
+ANM7H0iv6U/h7lWhnvBOlRfj89JChBvEROlWuYfyyELZpAXsmuwWdh0pwgGpqMI/
+EtLas2sbX5apE8P1S2Uxc+dS4IjoA/TrnP21rXwJ8AWzrntsZalSx9uueb1qhPp8
+EL7asG4+G3BpQrL1
+-----END CERTIFICATE-----
diff --git a/docker/stunnel/keys/ca-key.pem b/docker/stunnel/keys/ca-key.pem
new file mode 100644
index 0000000000..64db528c48
--- /dev/null
+++ b/docker/stunnel/keys/ca-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtSqof5fXyN/Y6GSLBBNA/zhrqw2qcBW4va6+Wc24WTaBXcP0
+w13njz+j1b5V9rbpz0i7WUkg3bBPecFFuCFyQnvn2JaE9b7kX1lLmszanrYfWQ9b
+YQyecox3HuYqeu330S+bD0liYh5rV7oEanuSCJW+a/dgEl3l/+Qb0zo2ZNEAXRuB
+v6lNmvBSsdItlc5n/P06ntJ6Ia/7rO0ZEiBb6hLFKfiIo/XvDrGNlYulJEcDmC3P
+kzzJRGnA7R2F7Vggj4l4pGE/3EtnA4C/rd0Shf9TIPQFA2HOx3oYsrOonuBYM2ur
+ciNeojP5XGY/Zdau7hzgFBgF8tWsLU6bKyZ3NwIDAQABAoIBACq8mWsgAsNcKusH
+bNPVRuvt/1gmrSIrvZzhb/33TZmeBf58j2zW5h0gwiFV+SluFNHVMnzph1tEkDsE
+oNHC8hVE7XhmaY8fLPhhNDicQqZWCCcWPFQ0idwzzpX3beX55Q/vzwBYK2FCE8hq
+FUiZReXIjVci0AMFK5Cl2vqFLPezAGvaZ4/M1reOF3vCgWl8IXTwYOs4EYd1CJt7
+bMwO9Q6P8V0BVhJO2tdwIe5XL5X086sMMPYXqMuwX9m3vZFQFpsZobmoAyYLVY+h
+IMoQZdh4O4sFYPQBPzhZXluFDl8rX6G5A9jUPxDfeVz+799RXi31jTYeH01OwM89
+/0BNryECgYEA15hU0qDAnM7fBiTTGbRUT/QPOmEUOPcnWfLWOyJsovAVLL1X0jmt
+GFm+FkTtOlcTVgDHXeHNw81zrgDDuW7fwaKloPeyWhyO6rp2jntAz/OayfA5UYOf
+REhXdQH7rMAkGgy1t7zKGHTYAslHjD2dOikCuHH/13otSJS4wNvTaZUCgYEA1x6L
+abxYDpR7jn2Yym0CbIiZ6tqShtqLi4eNF7PDVe3rUM7gYU767UFSKPvRpsq+BFwf
+LLRFgpggNRDrZWoK0ZekHD1x8pCJF+O4pj/Fhra4uI+hInycRQ4xsj9VU/WftxQ4
+aOojB28F0fBO56T90caQVSR09DGNmElSQFcw4psCgYApf8n8DTNmO6/UV+xGi16b
+UUhJHXyuBm0NtF+mXFb6+impRf0Mm0uFX2jmknfzfeVb7aRyns9jvD1jJgSGwh/R
+/wPQuz0aeVrNNf0yKels3eBStKnj1eknVKF5BVuzgfyxAvdLmcxw7rTRvHrINOf5
+1QEQDemISZ1D1lTF0sqcDQKBgCmE6rGAuZouzF4nHZtMSOB7yQFMKGXAvpgylGfT
+uUrXfch99U6yuLmcFuh0GfXQQbaDtTyimpvnEqhLWLOdMPNdCj6tGVYQ0XT77cKg
+olYq5CIzDo2icWLep3bYxHZM/QOP8odFUXd41S287O3GqXqYkXjtbWlIOyT+WdKz
+QWsrAoGALnac4Vh2s12Cv3YiQbkPtBRe8oxI0h6DEIdBciPDGq6WXq6O2PXXuBhM
+X47mObUsSuzI6hI4/vd4/tXD7TM3fS1YDdZXj7d51ZjT/jmlTVxAHa3DJ8i7o+rH
+Fqv/lh6MB6FGkXZ9vAGQe5RwUbDD16QO/1mz7fg0YBA9A8plM8s=
+-----END RSA PRIVATE KEY-----
diff --git a/docker/stunnel/keys/client-cert.pem b/docker/stunnel/keys/client-cert.pem
new file mode 100644
index 0000000000..5c48eb8b3d
--- /dev/null
+++ b/docker/stunnel/keys/client-cert.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDYDCCAkgCAQEwDQYJKoZIhvcNAQEFBQAwdTELMAkGA1UEBhMCQ0ExETAPBgNV
+BAgMCFdpbm5pcGVnMREwDwYDVQQHDAhNYW5pdG9iYTESMBAGA1UECgwJU29tZSBD
+b3JwMRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNv
+bTAgFw0yMjAxMTIxNDU0MjFaGA8zMDIxMDUxNTE0NTQyMVowdTELMAkGA1UEBhMC
+Q0ExETAPBgNVBAgMCFdpbm5pcGVnMREwDwYDVQQHDAhNYW5pdG9iYTESMBAGA1UE
+CgwJU29tZSBDb3JwMRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MRQwEgYDVQQDDAtl
+eGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALwWezv2
+WHf6fCyiLYHmi3+Qf/33VmdNAapWzpOZ0Xmuzf8SSoEep/YotvnmIBe8DqspjzBW
+eeg+n7qre+qawGv1AOANlStLKeNvnXhWS0bdoAKMP68Q8jvU+YSmJNZTRkg/39MA
+YNqxYABYamoIQ7qX+g91HsCxPSzqIyjLwY4hPHGYfxGhRH5ne2RtsYEcMjOJWs8s
+U4x6wpwn9Y4vnG1AqpcwY4xm65g/52BWWM9WfZ++y17MynSdoE29EqXCAGqhh1i1
+IRlKN1vr/792VYzOm2fHScaaCaCmhDIlTw0TlOgnfi7CFtY0z6uizSwG4RWCW+3/
+g47T3q8aCnvlkCkCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAGuNzbKtvKsdfQaAV
+SmeNAZqyoG2Fbmh/txj6j+UThu3tadk07/SukgsM6yepeq05+wguW43eBtig/LzH
+pSHCn4s/w0fvu1GqePWsTdGI3xnJakZIlkOXPStIgZJNewT9rD6WoRfthvTOda8v
+NBjW0InACnVvzAivX9xhbUB4K/I8aEGaAZwzIGnQbsxygPVZKe/Y8oWhiks0qYo2
+Wev1Swli4EeqbYvg+3TMy7T1pDkjAmAdsv7yJAYKsM3xCu7K8vA/e+2J2hjUQIfI
+Thdjb6FNywihVaAK2BUqL6cMgF8I+nX7ywVOBAz+a3F00sSogapztinzqsjFDeT9
+5V/MSg==
+-----END CERTIFICATE-----
diff --git a/docker/stunnel/keys/client-key.pem b/docker/stunnel/keys/client-key.pem
new file mode 100644
index 0000000000..4117706d0e
--- /dev/null
+++ b/docker/stunnel/keys/client-key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC8Fns79lh3+nws
+oi2B5ot/kH/991ZnTQGqVs6TmdF5rs3/EkqBHqf2KLb55iAXvA6rKY8wVnnoPp+6
+q3vqmsBr9QDgDZUrSynjb514VktG3aACjD+vEPI71PmEpiTWU0ZIP9/TAGDasWAA
+WGpqCEO6l/oPdR7AsT0s6iMoy8GOITxxmH8RoUR+Z3tkbbGBHDIziVrPLFOMesKc
+J/WOL5xtQKqXMGOMZuuYP+dgVljPVn2fvstezMp0naBNvRKlwgBqoYdYtSEZSjdb
+6/+/dlWMzptnx0nGmgmgpoQyJU8NE5ToJ34uwhbWNM+ros0sBuEVglvt/4OO096v
+Ggp75ZApAgMBAAECggEBAJDXLydJ2W7rMdydNzYld59Qg3/rjFoYbwPhvUrk1O9D
+sdaPG1i7ZtSlHeLrWCNu6kzcwCuVLGOwdgimLdLIQQ3hqj7fttOUGjnOphEZQvbb
+jHDp19DU1/VDWLLRzuRNVH4m0hIG5I8EsM0TST9GBgIXLrXgl0IEOvvvggvUfMUZ
+eGrrVsW56XIc25LZCalf20lcoyKa2hVjtlF2ds41PY6WqytkRJ7zpnBzO4g+Kz3D
+iA2rzNn/Ds2CCvuNDA8UF6qG/INbcySaq+qbSYLohWSsz9smIhkWUyF4YfbtziZr
+8AbxZKbS8VopSFxF+o35CbEZeTPkFkrBfbD0xUlCeEECgYEA6h1hLodTeQUpQoc3
+6brWvw1gM/tM0RyKbpOEwJGK1MnX99IM5z6qGY+d1htl7cB3RARpaY1HAvRXHhXt
+9qaSdhqR1hagZLn2vbelFkbJ0N1agdR6XYgGoxfH2RCluNfZZPOB6urfCLNbMjgb
+B1rkvIWiELCzujwsZ6m5sOomP70CgYEAzauggpcqEXQ4P4+y6B/8gOt7chuRczft
+1YTj2Y5tfZSTZmh01BUgenDgA1+NFJ9ni33P6+Ij/1D0ZGdea5Lqw2VP1ZDEIYSm
+j3ekkge/0AljZgIil2UviBhx5W2BlwnlukIwMvzVRwDulQsV3sDxprZKHYTaRcnC
+EB4Y9T6uUt0CgYBjeCojP8IaiDPYnWUHPKgjMoaub1Za/ppekvTzcKMg98V3+Noc
+okZZZ+iy4J81HfJOhuVWwHzsZ25gTQb3JhzSa0WNRb3OLikEwHM2/MqgoHvk76cx
++CqBvwfdVTJkT+mA9+k6K6KpqrLTqnzpahgHdWu/VaR3OzvOq5FG9qVbrQKBgF5F
+xRUW5RmLBB1eaMstnjgZuEPdjxYZFNNCTo5yUo21hLr0NljgNjrpckUZjzlct8Gg
+saWVyppFKUC8gPMeLK3TynxCFySmARLR7IVjN/DL3NvtLp3mq5reWZaoUzZAOyTd
+Ieq9KaWaL8HxitzH4/xeoipVsxc6G9H3eckwKgehAoGBAM/E0qLpEXOaLxODY8tt
++qpoNWHZn1M6cVX+tA/6igKfqUY96lefLmEiV1N01qW7+keFMXT12X/edsykG8jd
+gcNkNjSNwDSi8ixl0YlQwRJjX93TEip78sisQ3mCUqZUCNbm0Dm66Bqe8rAD5AdF
+G4oVbUu1gN0StX85Uw8J0AYS
+-----END PRIVATE KEY-----
diff --git a/docker/stunnel/keys/client-req.pem b/docker/stunnel/keys/client-req.pem
new file mode 100644
index 0000000000..ecf83f4daa
--- /dev/null
+++ b/docker/stunnel/keys/client-req.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICujCCAaICAQAwdTELMAkGA1UEBhMCQ0ExETAPBgNVBAgMCFdpbm5pcGVnMREw
+DwYDVQQHDAhNYW5pdG9iYTESMBAGA1UECgwJU29tZSBDb3JwMRYwFAYDVQQLDA1J
+VCBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBALwWezv2WHf6fCyiLYHmi3+Qf/33VmdNAapWzpOZ
+0Xmuzf8SSoEep/YotvnmIBe8DqspjzBWeeg+n7qre+qawGv1AOANlStLKeNvnXhW
+S0bdoAKMP68Q8jvU+YSmJNZTRkg/39MAYNqxYABYamoIQ7qX+g91HsCxPSzqIyjL
+wY4hPHGYfxGhRH5ne2RtsYEcMjOJWs8sU4x6wpwn9Y4vnG1AqpcwY4xm65g/52BW
+WM9WfZ++y17MynSdoE29EqXCAGqhh1i1IRlKN1vr/792VYzOm2fHScaaCaCmhDIl
+Tw0TlOgnfi7CFtY0z6uizSwG4RWCW+3/g47T3q8aCnvlkCkCAwEAAaAAMA0GCSqG
+SIb3DQEBCwUAA4IBAQAqLgfkWWIE1RV1TENnr9jT+SK8u3F2nX4mUzNmy8azq52I
+fO8qPKmvV2amt5y961jNpR+rRpARncONuf6NQR5qCMu/EKjVi9BhOkoIOK0RjgtK
+AkCTON1J8022JDQpN5/H5ZpLDkIlBtpwDvEaR/PnTaJxtGwLY8HxY6h20PDjP3J9
+Xu3w3m/s3uVjFG07RDvbwK02vYskePnlsKVw+uu5C2blOQRlRVvdCCkwN0y6IiWW
+uRGRSzwufgejrfDUJG4VZuNpvWjFfzjHW105g1AxaTW3anRqBSNxYF+iawfbGdf4
+bGT4Wazbwq5uU3uixxOzxPMI5ZP/gn0ywz9S1RRK
+-----END CERTIFICATE REQUEST-----
diff --git a/docker/stunnel/keys/server-cert.pem b/docker/stunnel/keys/server-cert.pem
new file mode 100644
index 0000000000..3a1bf72011
--- /dev/null
+++ b/docker/stunnel/keys/server-cert.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDYDCCAkgCAQEwDQYJKoZIhvcNAQEFBQAwdTELMAkGA1UEBhMCQ0ExETAPBgNV
+BAgMCFdpbm5pcGVnMREwDwYDVQQHDAhNYW5pdG9iYTESMBAGA1UECgwJU29tZSBD
+b3JwMRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNv
+bTAgFw0yMjAxMTIxNDU0MjFaGA8zMDIxMDUxNTE0NTQyMVowdTELMAkGA1UEBhMC
+Q0ExETAPBgNVBAgMCFdpbm5pcGVnMREwDwYDVQQHDAhNYW5pdG9iYTESMBAGA1UE
+CgwJU29tZSBDb3JwMRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MRQwEgYDVQQDDAtl
+eGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMxZETTb
+dxqFsNjUIJbpS6ZT9RkH/dWYTVk1uRUMh6Cr6920g/7pSaRLIx8guTDHa1jhPIlX
+lax7oZyX9coLjhSc6cy0ZmoH0zrp8ZbRc/qOawuO62arKP89pO/18MB3r9zPb1PJ
+evTP203+2a8ly25cscMTUge+rHMFAUW+/01hc90CY9ial9oCl9wtoPdPGA8XlX3u
+RswOAM79fM+Szvv+bX0VvFakkfHIE8oIK5/rJYDswBKAshw5CjW/OEjD6FbCb84c
+1E7jJhwwd6X70yDMOrJ8iVkA/lpzfoosiuYm/okgbPPXWEo8aa//MrSH90l2+M9q
+Vvn8hbmwlJl+2IMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAEcTps0CUnBZJBH/w
+8oJo8kAvsHhFTLJpTtiztut5qI+FMgC6sPcVUKi95gie2pdJ91y6sFzqLpghAciR
+ocYBy/jxK0M7OGJHLpUPeCS1yxeEyeZXpMPS90bUo1tPh7QDAojoRrFYo6M6DbL3
+dcErTJlvKnBBT9/DmENx75R+1nSB86vq0je+X0IqbZXeJyWju6ybjbwo1NPpnu+f
+jnXTG0+ZIsepms0VTXwcTy3dthIE+uw4XqTQ1qYg2stQAOUJ0nmb68NExi5zom5G
+0nh7tZnL0N+Z+XeNo7gaVatxfmgyk/HO2Vl4Wk4NA0PkR0yk2vNUwS0rKAb2mYc6
+T2gHdQ==
+-----END CERTIFICATE-----
diff --git a/docker/stunnel/keys/server-key.pem b/docker/stunnel/keys/server-key.pem
new file mode 100644
index 0000000000..62595e017c
--- /dev/null
+++ b/docker/stunnel/keys/server-key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDMWRE023cahbDY
+1CCW6UumU/UZB/3VmE1ZNbkVDIegq+vdtIP+6UmkSyMfILkwx2tY4TyJV5Wse6Gc
+l/XKC44UnOnMtGZqB9M66fGW0XP6jmsLjutmqyj/PaTv9fDAd6/cz29TyXr0z9tN
+/tmvJctuXLHDE1IHvqxzBQFFvv9NYXPdAmPYmpfaApfcLaD3TxgPF5V97kbMDgDO
+/XzPks77/m19FbxWpJHxyBPKCCuf6yWA7MASgLIcOQo1vzhIw+hWwm/OHNRO4yYc
+MHel+9MgzDqyfIlZAP5ac36KLIrmJv6JIGzz11hKPGmv/zK0h/dJdvjPalb5/IW5
+sJSZftiDAgMBAAECggEAct5+daAIy7frOXfE+hAanl0DohaD8dWzZTp12Ac7Fm6O
+IAqhSGILK3exPoY+k9UF2uiCBxJc6KB2sHgbioAEVkH+izu9dkz/yFZJn+YNtALq
+2Yx1dzkvyor0dI9jzk15Zj6U7hyMKaHOPYHNDE/Kkzc4Fdh+fCwK9H0TwgkjqnLj
+hfRK32+SqaftkhZnCxaFfdVVzhonWsaB7VcyUPdIHAMG0xUQ9oNTM0WLPotU/uh0
+XDCemwXhkqfKaAlnj0YBsu65WOTTiPixOPigDe745CHFBXwvCF28kxjSbCAVlHIv
+JcTtq1EA+fNHRTeHgNGSpqOdfuVrBMyp3KiztLBfQQKBgQD47MFmQphXVQWRmKoU
+gCFf28notV8J0VGyG7E0tFMS3GgyAAl8H8I6fB9UYOmD95PrHTROxKpc7jYtZRW3
+KcYJP5zKa+DqSSks8I5dLwFkKYVC0GiEJWuRwS9aHaD7ja65NtXJO+2iZ598s39w
+iSx0OAvaf9cFUrsAmHAE84c+/QKBgQDSJ/VE1CS0Tv2kL5Wbr/RmgYBZbXHnRz6j
+LFA7JwX3seHtuo+WBe8BMOMS4YqW6K2YTqwU8NtN1oATWg72TcLhwJZ3sKGPiMhM
+/cHW0dJqYsXujIOd/dlSr+j9Mouoxm6Spl+hGpj2IPUV9Dlm8N4SqPk83m0O+8Hy
+P088HK7NfwKBgQC3D0XbMjZeY0RJIoBRuzjQCg6eeGOAENOHrB3RqJs/T5/AxY40
+Hhb0c7uGjg6s4jGBwmRpWPAAj56AG8qwfKQKwSFJK7SoF02UowPPO3ZGdtJtpF54
+cBx/gBaWqxtsY3GO++iUqOHFgXckeczKsdZjUaRF96XlYEXt1izrNzzK8QKBgQCP
+OsCE6nkhknx3/B5g/2j4u+Y4DMmGsR3VpAwCZLRCfq/WkEHwI5cjHqiEY8dK1sYJ
+egT6OLWetUSQ694qrBDYP6PNa0qRQs4Q+xmzSUm5TBxOWuIROcN2AYIvntVkb+lI
+da/TYwdBKHEhR1Qf/qW73gIQJB/8CEXEzrU36OySDQKBgQD35khRdiU+1bPt/DpW
++8A+88BuxXMFxKYtEoMuTJnb7enarwp7+FtY6WhNgOgxELTpRbYw9496mOmNbJKL
+PmTXzs3aS5bv/2JTtc5+CHzf9PJ+jAYWnh9hCq9x/mA0QRMQAZEi8vhhYFaWiiV3
+wUYnDFnnAKia1VILt9jZ7I4T7Q==
+-----END PRIVATE KEY-----
diff --git a/docker/stunnel/keys/server-req.pem b/docker/stunnel/keys/server-req.pem
new file mode 100644
index 0000000000..361891d1c8
--- /dev/null
+++ b/docker/stunnel/keys/server-req.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICujCCAaICAQAwdTELMAkGA1UEBhMCQ0ExETAPBgNVBAgMCFdpbm5pcGVnMREw
+DwYDVQQHDAhNYW5pdG9iYTESMBAGA1UECgwJU29tZSBDb3JwMRYwFAYDVQQLDA1J
+VCBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAMxZETTbdxqFsNjUIJbpS6ZT9RkH/dWYTVk1uRUM
+h6Cr6920g/7pSaRLIx8guTDHa1jhPIlXlax7oZyX9coLjhSc6cy0ZmoH0zrp8ZbR
+c/qOawuO62arKP89pO/18MB3r9zPb1PJevTP203+2a8ly25cscMTUge+rHMFAUW+
+/01hc90CY9ial9oCl9wtoPdPGA8XlX3uRswOAM79fM+Szvv+bX0VvFakkfHIE8oI
+K5/rJYDswBKAshw5CjW/OEjD6FbCb84c1E7jJhwwd6X70yDMOrJ8iVkA/lpzfoos
+iuYm/okgbPPXWEo8aa//MrSH90l2+M9qVvn8hbmwlJl+2IMCAwEAAaAAMA0GCSqG
+SIb3DQEBCwUAA4IBAQCljqLOTU3tFEqxJ2AbZ5HVg9AN/SEUX8c/SyzCBii3r9Dj
+ubp0YWvYvgm7lnXsFAVDznf89RAzwdFur5iAQ95VfWBW6NEjdFQIh51KF6P/Qzjg
+TbctVeX/MTPuKewVhkQg9/sRmegbb+RBKEeCZccLUVuk5DAgFmi0cFP4e50uuNRG
+gwskG9nJp/X5aBd4Y1YKg8XS+WLPwwrYvffoHN8mWHh+YqF16MbxMHM5xRMWu6E7
+801EzEWAW5Y8J2ssp/9FSI+aXOhk68aNlIVNc2R6Rg1IA8zKV4WSWTMUWAud832h
+z9UZH/YkPgipuiflpKBGs5lbElRx3o6lYblhRL8J
+-----END CERTIFICATE REQUEST-----
diff --git a/docker/unstable/redis.conf b/docker/unstable/redis.conf
new file mode 100644
index 0000000000..93a55cf3b3
--- /dev/null
+++ b/docker/unstable/redis.conf
@@ -0,0 +1,3 @@
+port 6378
+protected-mode no
+save ""
diff --git a/docker/unstable_cluster/redis.conf b/docker/unstable_cluster/redis.conf
new file mode 100644
index 0000000000..f307a63757
--- /dev/null
+++ b/docker/unstable_cluster/redis.conf
@@ -0,0 +1,4 @@
+# Redis Cluster config file will be shared across all nodes.
+# Do not change the following configurations that are already set:
+# port, cluster-enabled, daemonize, logfile, dir
+protected-mode no
diff --git a/docs/_static/redis-cube-red-white-rgb.svg b/docs/_static/redis-cube-red-white-rgb.svg
new file mode 100644
index 0000000000..936eb231b9
--- /dev/null
+++ b/docs/_static/redis-cube-red-white-rgb.svg
@@ -0,0 +1,30 @@
+
+
+
\ No newline at end of file
diff --git a/docs/advanced_features.rst b/docs/advanced_features.rst
new file mode 100644
index 0000000000..5fd20c2ba2
--- /dev/null
+++ b/docs/advanced_features.rst
@@ -0,0 +1,436 @@
+Advanced Features
+=================
+
+A note about threading
+----------------------
+
+Redis client instances can safely be shared between threads. Internally,
+connection instances are only retrieved from the connection pool during
+command execution, and returned to the pool directly after. Command
+execution never modifies state on the client instance.
+
+However, there is one caveat: the Redis SELECT command. The SELECT
+command allows you to switch the database currently in use by the
+connection. That database remains selected until another is selected or
+until the connection is closed. This creates an issue in that
+connections could be returned to the pool that are connected to a
+different database.
+
+As a result, redis-py does not implement the SELECT command on client
+instances. If you use multiple Redis databases within the same
+application, you should create a separate client instance (and possibly
+a separate connection pool) for each database.
+
+It is not safe to pass PubSub or Pipeline objects between threads.
+
+Pipelines
+---------
+
+Default pipelines
+~~~~~~~~~~~~~~~~~
+
+Pipelines are a subclass of the base Redis class that provide support
+for buffering multiple commands to the server in a single request. They
+can be used to dramatically increase the performance of groups of
+commands by reducing the number of back-and-forth TCP packets between
+the client and server.
+
+Pipelines are quite simple to use:
+
+.. code:: pycon
+
+ >>> r = redis.Redis(...)
+ >>> r.set('bing', 'baz')
+ >>> # Use the pipeline() method to create a pipeline instance
+ >>> pipe = r.pipeline()
+ >>> # The following SET commands are buffered
+ >>> pipe.set('foo', 'bar')
+ >>> pipe.get('bing')
+ >>> # the EXECUTE call sends all buffered commands to the server, returning
+ >>> # a list of responses, one for each command.
+ >>> pipe.execute()
+ [True, b'baz']
+
+For ease of use, all commands being buffered into the pipeline return
+the pipeline object itself. Therefore calls can be chained like:
+
+.. code:: pycon
+
+ >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute()
+ [True, True, 6]
+
+In addition, pipelines can also ensure the buffered commands are
+executed atomically as a group. This happens by default. If you want to
+disable the atomic nature of a pipeline but still want to buffer
+commands, you can turn off transactions.
+
+.. code:: pycon
+
+ >>> pipe = r.pipeline(transaction=False)
+
+A common issue occurs when requiring atomic transactions but needing to
+retrieve values in Redis prior for use within the transaction. For
+instance, let's assume that the INCR command didn't exist and we need to
+build an atomic version of INCR in Python.
+
+The completely naive implementation could GET the value, increment it in
+Python, and SET the new value back. However, this is not atomic because
+multiple clients could be doing this at the same time, each getting the
+same value from GET.
+
+Enter the WATCH command. WATCH provides the ability to monitor one or
+more keys prior to starting a transaction. If any of those keys change
+prior the execution of that transaction, the entire transaction will be
+canceled and a WatchError will be raised. To implement our own
+client-side INCR command, we could do something like this:
+
+.. code:: pycon
+
+ >>> with r.pipeline() as pipe:
+ ... while True:
+ ... try:
+ ... # put a WATCH on the key that holds our sequence value
+ ... pipe.watch('OUR-SEQUENCE-KEY')
+ ... # after WATCHing, the pipeline is put into immediate execution
+ ... # mode until we tell it to start buffering commands again.
+ ... # this allows us to get the current value of our sequence
+ ... current_value = pipe.get('OUR-SEQUENCE-KEY')
+ ... next_value = int(current_value) + 1
+ ... # now we can put the pipeline back into buffered mode with MULTI
+ ... pipe.multi()
+ ... pipe.set('OUR-SEQUENCE-KEY', next_value)
+ ... # and finally, execute the pipeline (the set command)
+ ... pipe.execute()
+ ... # if a WatchError wasn't raised during execution, everything
+ ... # we just did happened atomically.
+ ... break
+ ... except WatchError:
+ ... # another client must have changed 'OUR-SEQUENCE-KEY' between
+ ... # the time we started WATCHing it and the pipeline's execution.
+ ... # our best bet is to just retry.
+ ... continue
+
+Note that, because the Pipeline must bind to a single connection for the
+duration of a WATCH, care must be taken to ensure that the connection is
+returned to the connection pool by calling the reset() method. If the
+Pipeline is used as a context manager (as in the example above) reset()
+will be called automatically. Of course you can do this the manual way
+by explicitly calling reset():
+
+.. code:: pycon
+
+ >>> pipe = r.pipeline()
+ >>> while True:
+ ... try:
+ ... pipe.watch('OUR-SEQUENCE-KEY')
+ ... ...
+ ... pipe.execute()
+ ... break
+ ... except WatchError:
+ ... continue
+ ... finally:
+ ... pipe.reset()
+
+A convenience method named "transaction" exists for handling all the
+boilerplate of handling and retrying watch errors. It takes a callable
+that should expect a single parameter, a pipeline object, and any number
+of keys to be WATCHed. Our client-side INCR command above can be written
+like this, which is much easier to read:
+
+.. code:: pycon
+
+ >>> def client_side_incr(pipe):
+ ... current_value = pipe.get('OUR-SEQUENCE-KEY')
+ ... next_value = int(current_value) + 1
+ ... pipe.multi()
+ ... pipe.set('OUR-SEQUENCE-KEY', next_value)
+ >>>
+ >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
+ [True]
+
+Be sure to call pipe.multi() in the callable passed to Redis.transaction
+prior to any write commands.
+
+Pipelines in clusters
+~~~~~~~~~~~~~~~~~~~~~
+
+ClusterPipeline is a subclass of RedisCluster that provides support for
+Redis pipelines in cluster mode. When calling the execute() command, all
+the commands are grouped by the node on which they will be executed, and
+are then executed by the respective nodes in parallel. The pipeline
+instance will wait for all the nodes to respond before returning the
+result to the caller. Command responses are returned as a list sorted in
+the same order in which they were sent. Pipelines can be used to
+dramatically increase the throughput of Redis Cluster by significantly
+reducing the number of network round trips between the client and
+the server.
+
+.. code:: pycon
+
+ >>> with rc.pipeline() as pipe:
+ ... pipe.set('foo', 'value1')
+ ... pipe.set('bar', 'value2')
+ ... pipe.get('foo')
+ ... pipe.get('bar')
+ ... print(pipe.execute())
+ [True, True, b'value1', b'value2']
+ ... pipe.set('foo1', 'bar1').get('foo1').execute()
+ [True, b'bar1']
+
+Please note: - RedisCluster pipelines currently only support key-based
+commands. - The pipeline gets its âread_from_replicasâ value from the
+clusterâs parameter. Thus, if read from replications is enabled in the
+cluster instance, the pipeline will also direct read commands to
+replicas. - The âtransactionâ option is NOT supported in cluster-mode.
+In non-cluster mode, the âtransactionâ option is available when
+executing pipelines. This wraps the pipeline commands with MULTI/EXEC
+commands, and effectively turns the pipeline commands into a single
+transaction block. This means that all commands are executed
+sequentially without any interruptions from other clients. However, in
+cluster-mode this is not possible, because commands are partitioned
+according to their respective destination nodes. This means that we can
+not turn the pipeline commands into one transaction block, because in
+most cases they are split up into several smaller pipelines.
+
+Publish / Subscribe
+-------------------
+
+redis-py includes a PubSub object that subscribes to channels and
+listens for new messages. Creating a PubSub object is easy.
+
+.. code:: pycon
+
+ >>> r = redis.Redis(...)
+ >>> p = r.pubsub()
+
+Once a PubSub instance is created, channels and patterns can be
+subscribed to.
+
+.. code:: pycon
+
+ >>> p.subscribe('my-first-channel', 'my-second-channel', ...)
+ >>> p.psubscribe('my-*', ...)
+
+The PubSub instance is now subscribed to those channels/patterns. The
+subscription confirmations can be seen by reading messages from the
+PubSub instance.
+
+.. code:: pycon
+
+ >>> p.get_message()
+ {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
+ >>> p.get_message()
+ {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2}
+ >>> p.get_message()
+ {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3}
+
+Every message read from a PubSub instance will be a dictionary with the
+following keys.
+
+- **type**: One of the following: 'subscribe', 'unsubscribe',
+ 'psubscribe', 'punsubscribe', 'message', 'pmessage'
+- **channel**: The channel [un]subscribed to or the channel a message
+ was published to
+- **pattern**: The pattern that matched a published message's channel.
+ Will be None in all cases except for 'pmessage' types.
+- **data**: The message data. With [un]subscribe messages, this value
+ will be the number of channels and patterns the connection is
+ currently subscribed to. With [p]message messages, this value will be
+ the actual published message.
+
+Let's send a message now.
+
+.. code:: pycon
+
+ # the publish method returns the number matching channel and pattern
+ # subscriptions. 'my-first-channel' matches both the 'my-first-channel'
+ # subscription and the 'my-*' pattern subscription, so this message will
+ # be delivered to 2 channels/patterns
+ >>> r.publish('my-first-channel', 'some data')
+ 2
+ >>> p.get_message()
+ {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'}
+ >>> p.get_message()
+ {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'}
+
+Unsubscribing works just like subscribing. If no arguments are passed to
+[p]unsubscribe, all channels or patterns will be unsubscribed from.
+
+.. code:: pycon
+
+ >>> p.unsubscribe()
+ >>> p.punsubscribe('my-*')
+ >>> p.get_message()
+ {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'}
+ >>> p.get_message()
+ {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'}
+ >>> p.get_message()
+ {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'}
+
+redis-py also allows you to register callback functions to handle
+published messages. Message handlers take a single argument, the
+message, which is a dictionary just like the examples above. To
+subscribe to a channel or pattern with a message handler, pass the
+channel or pattern name as a keyword argument with its value being the
+callback function.
+
+When a message is read on a channel or pattern with a message handler,
+the message dictionary is created and passed to the message handler. In
+this case, a None value is returned from get_message() since the message
+was already handled.
+
+.. code:: pycon
+
+ >>> def my_handler(message):
+ ... print('MY HANDLER: ', message['data'])
+ >>> p.subscribe(**{'my-channel': my_handler})
+ # read the subscribe confirmation message
+ >>> p.get_message()
+ {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1}
+ >>> r.publish('my-channel', 'awesome data')
+ 1
+ # for the message handler to work, we need tell the instance to read data.
+ # this can be done in several ways (read more below). we'll just use
+ # the familiar get_message() function for now
+ >>> message = p.get_message()
+ MY HANDLER: awesome data
+ # note here that the my_handler callback printed the string above.
+ # `message` is None because the message was handled by our handler.
+ >>> print(message)
+ None
+
+If your application is not interested in the (sometimes noisy)
+subscribe/unsubscribe confirmation messages, you can ignore them by
+passing ignore_subscribe_messages=True to r.pubsub(). This will cause
+all subscribe/unsubscribe messages to be read, but they won't bubble up
+to your application.
+
+.. code:: pycon
+
+ >>> p = r.pubsub(ignore_subscribe_messages=True)
+ >>> p.subscribe('my-channel')
+ >>> p.get_message() # hides the subscribe message and returns None
+ >>> r.publish('my-channel', 'my data')
+ 1
+ >>> p.get_message()
+ {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'}
+
+There are three different strategies for reading messages.
+
+The examples above have been using pubsub.get_message(). Behind the
+scenes, get_message() uses the system's 'select' module to quickly poll
+the connection's socket. If there's data available to be read,
+get_message() will read it, format the message and return it or pass it
+to a message handler. If there's no data to be read, get_message() will
+immediately return None. This makes it trivial to integrate into an
+existing event loop inside your application.
+
+.. code:: pycon
+
+ >>> while True:
+ >>> message = p.get_message()
+ >>> if message:
+ >>> # do something with the message
+ >>> time.sleep(0.001) # be nice to the system :)
+
+Older versions of redis-py only read messages with pubsub.listen().
+listen() is a generator that blocks until a message is available. If
+your application doesn't need to do anything else but receive and act on
+messages received from redis, listen() is an easy way to get up an
+running.
+
+.. code:: pycon
+
+ >>> for message in p.listen():
+ ... # do something with the message
+
+The third option runs an event loop in a separate thread.
+pubsub.run_in_thread() creates a new thread and starts the event loop.
+The thread object is returned to the caller of [un_in_thread(). The
+caller can use the thread.stop() method to shut down the event loop and
+thread. Behind the scenes, this is simply a wrapper around get_message()
+that runs in a separate thread, essentially creating a tiny non-blocking
+event loop for you. run_in_thread() takes an optional sleep_time
+argument. If specified, the event loop will call time.sleep() with the
+value in each iteration of the loop.
+
+Note: Since we're running in a separate thread, there's no way to handle
+messages that aren't automatically handled with registered message
+handlers. Therefore, redis-py prevents you from calling run_in_thread()
+if you're subscribed to patterns or channels that don't have message
+handlers attached.
+
+.. code:: pycon
+
+ >>> p.subscribe(**{'my-channel': my_handler})
+ >>> thread = p.run_in_thread(sleep_time=0.001)
+ # the event loop is now running in the background processing messages
+ # when it's time to shut it down...
+ >>> thread.stop()
+
+run_in_thread also supports an optional exception handler, which lets
+you catch exceptions that occur within the worker thread and handle them
+appropriately. The exception handler will take as arguments the
+exception itself, the pubsub object, and the worker thread returned by
+run_in_thread.
+
+.. code:: pycon
+
+ >>> p.subscribe(**{'my-channel': my_handler})
+ >>> def exception_handler(ex, pubsub, thread):
+ >>> print(ex)
+ >>> thread.stop()
+ >>> thread.join(timeout=1.0)
+ >>> pubsub.close()
+ >>> thread = p.run_in_thread(exception_handler=exception_handler)
+
+A PubSub object adheres to the same encoding semantics as the client
+instance it was created from. Any channel or pattern that's unicode will
+be encoded using the charset specified on the client before being sent
+to Redis. If the client's decode_responses flag is set the False (the
+default), the 'channel', 'pattern' and 'data' values in message
+dictionaries will be byte strings (str on Python 2, bytes on Python 3).
+If the client's decode_responses is True, then the 'channel', 'pattern'
+and 'data' values will be automatically decoded to unicode strings using
+the client's charset.
+
+PubSub objects remember what channels and patterns they are subscribed
+to. In the event of a disconnection such as a network error or timeout,
+the PubSub object will re-subscribe to all prior channels and patterns
+when reconnecting. Messages that were published while the client was
+disconnected cannot be delivered. When you're finished with a PubSub
+object, call its .close() method to shutdown the connection.
+
+.. code:: pycon
+
+ >>> p = r.pubsub()
+ >>> ...
+ >>> p.close()
+
+The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also
+supported:
+
+.. code:: pycon
+
+ >>> r.pubsub_channels()
+ [b'foo', b'bar']
+ >>> r.pubsub_numsub('foo', 'bar')
+ [(b'foo', 9001), (b'bar', 42)]
+ >>> r.pubsub_numsub('baz')
+ [(b'baz', 0)]
+ >>> r.pubsub_numpat()
+ 1204
+
+Monitor
+~~~~~~~
+
+redis-py includes a Monitor object that streams every command processed
+by the Redis server. Use listen() on the Monitor object to block until a
+command is received.
+
+.. code:: pycon
+
+ >>> r = redis.Redis(...)
+ >>> with r.monitor() as m:
+ >>> for command in m.listen():
+ >>> print(command)
diff --git a/docs/backoff.rst b/docs/backoff.rst
new file mode 100644
index 0000000000..c5ab01ab03
--- /dev/null
+++ b/docs/backoff.rst
@@ -0,0 +1,7 @@
+.. _backoff-label:
+
+Backoff
+#############
+
+.. automodule:: redis.backoff
+ :members:
\ No newline at end of file
diff --git a/docs/clustering.rst b/docs/clustering.rst
new file mode 100644
index 0000000000..34cb7f1f69
--- /dev/null
+++ b/docs/clustering.rst
@@ -0,0 +1,242 @@
+Clustering
+==========
+
+redis-py now supports cluster mode and provides a client for `Redis
+Cluster `__.
+
+The cluster client is based on Grokzenâs
+`redis-py-cluster `__, has
+added bug fixes, and now supersedes that library. Support for these
+changes is thanks to his contributions.
+
+To learn more about Redis Cluster, see `Redis Cluster
+specifications `__.
+
+`Creating clusters <#creating-clusters>`__ \| `Specifying Target
+Nodes <#specifying-target-nodes>`__ \| `Multi-key
+Commands <#multi-key-commands>`__ \| `Known PubSub
+Limitations <#known-pubsub-limitations>`__
+
+Creating clusters
+-----------------
+
+Connecting redis-py to a Redis Cluster instance(s) requires at a minimum
+a single node for cluster discovery. There are multiple ways in which a
+cluster instance can be created:
+
+- Using âhostâ and âportâ arguments:
+
+.. code:: pycon
+
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> rc = Redis(host='localhost', port=6379)
+ >>> print(rc.get_nodes())
+ [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis>>]]
+
+- Using the Redis URL specification:
+
+.. code:: pycon
+
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> rc = Redis.from_url("redis://localhost:6379/0")
+
+- Directly, via the ClusterNode class:
+
+.. code:: pycon
+
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> from redis.cluster import ClusterNode
+ >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)]
+ >>> rc = Redis(startup_nodes=nodes)
+
+When a RedisCluster instance is being created it first attempts to
+establish a connection to one of the provided startup nodes. If none of
+the startup nodes are reachable, a âRedisClusterExceptionâ will be
+thrown. After a connection to the one of the clusterâs nodes is
+established, the RedisCluster instance will be initialized with 3
+caches: a slots cache which maps each of the 16384 slots to the node/s
+handling them, a nodes cache that contains ClusterNode objects (name,
+host, port, redis connection) for all of the clusterâs nodes, and a
+commands cache contains all the server supported commands that were
+retrieved using the Redis âCOMMANDâ output. See *RedisCluster specific
+options* below for more.
+
+RedisCluster instance can be directly used to execute Redis commands.
+When a command is being executed through the cluster instance, the
+target node(s) will be internally determined. When using a key-based
+command, the target node will be the node that holds the keyâs slot.
+Cluster management commands and other commands that are not key-based
+have a parameter called âtarget_nodesâ where you can specify which nodes
+to execute the command on. In the absence of target_nodes, the command
+will be executed on the default cluster node. As part of cluster
+instance initialization, the clusterâs default node is randomly selected
+from the clusterâs primaries, and will be updated upon reinitialization.
+Using r.get_default_node(), you can get the clusterâs default node, or
+you can change it using the âset_default_nodeâ method.
+
+The âtarget_nodesâ parameter is explained in the following section,
+âSpecifying Target Nodesâ.
+
+.. code:: pycon
+
+ >>> # target-nodes: the node that holds 'foo1's key slot
+ >>> rc.set('foo1', 'bar1')
+ >>> # target-nodes: the node that holds 'foo2's key slot
+ >>> rc.set('foo2', 'bar2')
+ >>> # target-nodes: the node that holds 'foo1's key slot
+ >>> print(rc.get('foo1'))
+ b'bar'
+ >>> # target-node: default-node
+ >>> print(rc.keys())
+ [b'foo1']
+ >>> # target-node: default-node
+ >>> rc.ping()
+
+Specfiying Target Nodes
+-----------------------
+
+As mentioned above, all non key-based RedisCluster commands accept the
+kwarg parameter âtarget_nodesâ that specifies the node/nodes that the
+command should be executed on. The best practice is to specify target
+nodes using RedisCluster classâs node flags: PRIMARIES, REPLICAS,
+ALL_NODES, RANDOM. When a nodes flag is passed along with a command, it
+will be internally resolved to the relevant node/s. If the nodes
+topology of the cluster changes during the execution of a command, the
+client will be able to resolve the nodes flag again with the new
+topology and attempt to retry executing the command.
+
+.. code:: pycon
+
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> # run cluster-meet command on all of the cluster's nodes
+ >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES)
+ >>> # ping all replicas
+ >>> rc.ping(target_nodes=Redis.REPLICAS)
+ >>> # ping a random node
+ >>> rc.ping(target_nodes=Redis.RANDOM)
+ >>> # get the keys from all cluster nodes
+ >>> rc.keys(target_nodes=Redis.ALL_NODES)
+ [b'foo1', b'foo2']
+ >>> # execute bgsave in all primaries
+ >>> rc.bgsave(Redis.PRIMARIES)
+
+You could also pass ClusterNodes directly if you want to execute a
+command on a specific node / node group that isnât addressed by the
+nodes flag. However, if the command execution fails due to cluster
+topology changes, a retry attempt will not be made, since the passed
+target node/s may no longer be valid, and the relevant cluster or
+connection error will be returned.
+
+.. code:: pycon
+
+ >>> node = rc.get_node('localhost', 6379)
+ >>> # Get the keys only for that specific node
+ >>> rc.keys(target_nodes=node)
+ >>> # get Redis info from a subset of primaries
+ >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378]
+ >>> rc.info(target_nodes=subset_primaries)
+
+In addition, the RedisCluster instance can query the Redis instance of a
+specific node and execute commands on that node directly. The Redis
+client, however, does not handle cluster failures and retries.
+
+.. code:: pycon
+
+ >>> cluster_node = rc.get_node(host='localhost', port=6379)
+ >>> print(cluster_node)
+ [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>]
+ >>> r = cluster_node.redis_connection
+ >>> r.client_list()
+ [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}]
+ >>> # Get the keys only for that specific node
+ >>> r.keys()
+ [b'foo1']
+
+Multi-key Commands
+------------------
+
+Redis supports multi-key commands in Cluster Mode, such as Set type
+unions or intersections, mset and mget, as long as the keys all hash to
+the same slot. By using RedisCluster client, you can use the known
+functions (e.g. mget, mset) to perform an atomic multi-key operation.
+However, you must ensure all keys are mapped to the same slot, otherwise
+a RedisClusterException will be thrown. Redis Cluster implements a
+concept called hash tags that can be used in order to force certain keys
+to be stored in the same hash slot, see `Keys hash
+tag `__. You can
+also use nonatomic for some of the multikey operations, and pass keys
+that arenât mapped to the same slot. The client will then map the keys
+to the relevant slots, sending the commands to the slotsâ node owners.
+Non-atomic operations batch the keys according to their hash value, and
+then each batch is sent separately to the slotâs owner.
+
+.. code:: pycon
+
+ # Atomic operations can be used when all keys are mapped to the same slot
+ >>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'})
+ >>> rc.mget('{foo}1', '{foo}2')
+ [b'bar1', b'bar2']
+ # Non-atomic multi-key operations splits the keys into different slots
+ >>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3')
+ >>> rc.mget_nonatomic('foo', 'bar', 'zzz')
+ [b'value1', b'value2', b'value3']
+
+**Cluster PubSub:**
+
+When a ClusterPubSub instance is created without specifying a node, a
+single node will be transparently chosen for the pubsub connection on
+the first command execution. The node will be determined by: 1. Hashing
+the channel name in the request to find its keyslot 2. Selecting a node
+that handles the keyslot: If read_from_replicas is set to true, a
+replica can be selected.
+
+Known PubSub Limitations
+------------------------
+
+Pattern subscribe and publish do not currently work properly due to key
+slots. If we hash a pattern like fo\* we will receive a keyslot for that
+string but there are endless possibilities for channel names based on
+this pattern - unknowable in advance. This feature is not disabled but
+the commands are not currently recommended for use. See
+`redis-py-cluster
+documentation `__
+for more.
+
+.. code:: pycon
+
+ >>> p1 = rc.pubsub()
+ # p1 connection will be set to the node that holds 'foo' keyslot
+ >>> p1.subscribe('foo')
+ # p2 connection will be set to node 'localhost:6379'
+ >>> p2 = rc.pubsub(rc.get_node('localhost', 6379))
+
+**Read Only Mode**
+
+By default, Redis Cluster always returns MOVE redirection response on
+accessing a replica node. You can overcome this limitation and scale
+read commands by triggering READONLY mode.
+
+To enable READONLY mode pass read_from_replicas=True to RedisCluster
+constructor. When set to true, read commands will be assigned between
+the primary and its replications in a Round-Robin manner.
+
+READONLY mode can be set at runtime by calling the readonly() method
+with target_nodes=âreplicasâ, and read-write access can be restored by
+calling the readwrite() method.
+
+.. code:: pycon
+
+ >>> from cluster import RedisCluster as Redis
+ # Use 'debug' log level to print the node that the command is executed on
+ >>> rc_readonly = Redis(startup_nodes=startup_nodes,
+ ... read_from_replicas=True)
+ >>> rc_readonly.set('{foo}1', 'bar1')
+ >>> for i in range(0, 4):
+ ... # Assigns read command to the slot's hosts in a Round-Robin manner
+ ... rc_readonly.get('{foo}1')
+ # set command would be directed only to the slot's primary node
+ >>> rc_readonly.set('{foo}2', 'bar2')
+ # reset READONLY flag
+ >>> rc_readonly.readwrite(target_nodes='replicas')
+ # now the get command would be directed only to the slot's primary node
+ >>> rc_readonly.get('{foo}1')
diff --git a/docs/commands.rst b/docs/commands.rst
new file mode 100644
index 0000000000..d35f290ace
--- /dev/null
+++ b/docs/commands.rst
@@ -0,0 +1,30 @@
+Redis Commands
+##############
+
+Core Commands
+*************
+
+The following functions can be used to replicate their equivalent `Redis command `_. Generally they can be used as functions on your redis connection. For the simplest example, see below:
+
+Getting and settings data in redis::
+
+ import redis
+ r = redis.Redis(decode_responses=True)
+ r.set('mykey', 'thevalueofmykey')
+ r.get('mykey')
+
+.. autoclass:: redis.commands.core.CoreCommands
+ :inherited-members:
+
+Sentinel Commands
+*****************
+.. autoclass:: redis.commands.sentinel.SentinelCommands
+ :inherited-members:
+
+Redis Cluster Commands
+**********************
+
+The following `Redis commands `_ are available within a `Redis Cluster `_. Generally they can be used as functions on your redis connection.
+
+.. autoclass:: redis.commands.cluster.RedisClusterCommands
+ :inherited-members:
diff --git a/docs/conf.py b/docs/conf.py
index 8463eaaa16..cdbeb02c9a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-#
# redis-py documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 8 00:47:08 2013.
#
@@ -18,211 +16,246 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
+sys.path.append(os.path.abspath(os.path.pardir))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = []
+extensions = [
+ "nbsphinx",
+ "sphinx_gallery.load_style",
+ "sphinx.ext.autodoc",
+ "sphinx_autodoc_typehints",
+ "sphinx.ext.doctest",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.autosectionlabel",
+]
+
+# AutosectionLabel settings.
+# Uses a :