diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..9be06b93ca --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +source = redis diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..4dc633c1a6 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,4 @@ +**/__pycache__ +**/*.pyc +.coverage +.coverage.* diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..1af2323fe9 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +doctests/* @dmaier-redislabs diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3b8e7ba628..66ba87ec23 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,10 +2,11 @@ _Please make sure to review and check all of these items:_ -- [ ] Does `$ tox` pass with this change (including linting)? -- [ ] Does travis tests pass with this change (enable it first in your forked repo and wait for the travis build to finish)? +- [ ] Do tests and lints pass with this change? +- [ ] Do the CI tests pass with this change (enable it first in your forked repo and wait for the github action build to finish)? - [ ] Is the new or changed code fully tested? - [ ] Is a documentation update included (if this change modifies existing APIs, or introduces new ones)? +- [ ] Is there an example added to the examples folder (if applicable)? _NOTE: these things are not required to open a PR and can be done afterwards / while the PR is open._ diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml new file mode 100644 index 0000000000..5f801c894b --- /dev/null +++ b/.github/actions/run-tests/action.yml @@ -0,0 +1,163 @@ +name: 'Run redis-py tests' +description: 'Runs redis-py tests against different Redis versions and configurations' +inputs: + python-version: + description: 'Python version to use for running tests' + default: '3.12' + parser-backend: + description: 'Parser backend to use: plain or hiredis' + required: true + redis-version: + description: 'Redis version to test against' + required: true + hiredis-version: + description: 'hiredis version to test against' + required: false + default: '>3.0.0' + hiredis-branch: + description: 'hiredis branch to test against' + required: false + default: 'master' + event-loop: + description: 'Event loop to use' + required: false + default: 'asyncio' +runs: + using: "composite" + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + cache: 'pip' + + - uses: actions/checkout@v4 + if: ${{ inputs.parser-backend == 'hiredis' && inputs.hiredis-version == 'unstable' }} + with: + repository: redis/hiredis-py + submodules: true + path: hiredis-py + ref: ${{ inputs.hiredis-branch }} + + - name: Setup Test environment + env: + REDIS_VERSION: ${{ inputs.redis-version }} + CLIENT_LIBS_TEST_IMAGE_TAG: ${{ inputs.redis-version }} + run: | + set -e + + echo "::group::Installing dependencies" + pip install -r dev_requirements.txt + pip uninstall -y redis # uninstall Redis package installed via redis-entraid + pip install -e .[jwt] # install the working copy + if [ "${{inputs.parser-backend}}" == "hiredis" ]; then + if [[ "${{inputs.hiredis-version}}" == "unstable" ]]; then + echo "Installing unstable version of hiredis from local directory" + pip install -e ./hiredis-py + else + pip install "hiredis${{inputs.hiredis-version}}" + fi + echo "PARSER_BACKEND=$(echo "${{inputs.parser-backend}}_${{inputs.hiredis-version}}" | sed 's/[^a-zA-Z0-9]/_/g')" >> $GITHUB_ENV + else + echo "PARSER_BACKEND=${{inputs.parser-backend}}" >> $GITHUB_ENV + fi + echo "::endgroup::" + + echo "::group::Starting Redis servers" + redis_major_version=$(echo "$REDIS_VERSION" | grep -oP '^\d+') + echo "REDIS_MAJOR_VERSION=${redis_major_version}" >> $GITHUB_ENV + + if (( redis_major_version < 8 )); then + echo "Using redis-stack for module tests" + + # Mapping of redis version to stack version + declare -A redis_stack_version_mapping=( + ["7.4.4"]="rs-7.4.0-v5" + ["7.2.9"]="rs-7.2.0-v17" + ) + + if [[ -v redis_stack_version_mapping[$REDIS_VERSION] ]]; then + export CLIENT_LIBS_TEST_STACK_IMAGE_TAG=${redis_stack_version_mapping[$REDIS_VERSION]} + echo "REDIS_MOD_URL=redis://127.0.0.1:6479/0" >> $GITHUB_ENV + else + echo "Version not found in the mapping." + exit 1 + fi + + if (( redis_major_version < 7 )); then + export REDIS_STACK_EXTRA_ARGS="--tls-auth-clients optional --save ''" + export REDIS_EXTRA_ARGS="--tls-auth-clients optional --save ''" + fi + + invoke devenv --endpoints=all-stack + + else + echo "Using redis CE for module tests" + export CLIENT_LIBS_TEST_STACK_IMAGE_TAG=$REDIS_VERSION + echo "REDIS_MOD_URL=redis://127.0.0.1:6379" >> $GITHUB_ENV + invoke devenv --endpoints all + fi + + sleep 10 # time to settle + echo "::endgroup::" + shell: bash + + - name: Run tests + run: | + set -e + + run_tests() { + local protocol=$1 + local eventloop="" + + if [ "${{inputs.event-loop}}" == "uvloop" ]; then + eventloop="--uvloop" + fi + + echo "::group::RESP${protocol} standalone tests" + echo "REDIS_MOD_URL=${REDIS_MOD_URL}" + + if (( $REDIS_MAJOR_VERSION < 7 )) && [ "$protocol" == "3" ]; then + echo "Skipping module tests: Modules doesn't support RESP3 for Redis versions < 7" + invoke standalone-tests --redis-mod-url=${REDIS_MOD_URL} $eventloop --protocol="${protocol}" --extra-markers="not redismod and not cp_integration" + else + invoke standalone-tests --redis-mod-url=${REDIS_MOD_URL} $eventloop --protocol="${protocol}" + fi + + echo "::endgroup::" + + echo "::group::RESP${protocol} cluster tests" + invoke cluster-tests $eventloop --protocol=${protocol} + echo "::endgroup::" + } + + run_tests 2 "${{inputs.event-loop}}" + run_tests 3 "${{inputs.event-loop}}" + shell: bash + + - name: Debug + if: failure() + run: | + sudo apt-get install -y redis-tools + echo "Docker Containers:" + docker ps + echo "Cluster nodes:" + redis-cli -p 16379 CLUSTER NODES + shell: bash + + - name: Upload test results and profiling data + uses: actions/upload-artifact@v4 + with: + name: pytest-results-redis_${{inputs.redis-version}}-python_${{inputs.python-version}}-parser_${{env.PARSER_BACKEND}}-el_${{inputs.event-loop}} + path: | + *-results.xml + prof/** + profile_output* + if-no-files-found: error + retention-days: 10 + + - name: Upload codecov coverage + uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: false diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..ac71d74297 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + labels: + - "maintenance" + schedule: + interval: "monthly" diff --git a/.github/release-drafter-config.yml b/.github/release-drafter-config.yml new file mode 100644 index 0000000000..4607da071c --- /dev/null +++ b/.github/release-drafter-config.yml @@ -0,0 +1,55 @@ +name-template: '$NEXT_MINOR_VERSION' +tag-template: 'v$NEXT_MINOR_VERSION' +filter-by-commitish: true +commitish: master +autolabeler: + - label: 'maintenance' + files: + - '*.md' + - '.github/*' + - label: 'bug' + branch: + - '/bug-.+' + - label: 'maintenance' + branch: + - '/maintenance-.+' + - label: 'feature' + branch: + - '/feature-.+' +categories: + - title: '🔥 Breaking Changes' + labels: + - 'breakingchange' + - title: '🧪 Experimental Features' + labels: + - 'experimental' + - title: '🚀 New Features' + labels: + - 'feature' + - 'enhancement' + - title: '🐛 Bug Fixes' + labels: + - 'fix' + - 'bugfix' + - 'bug' + - 'BUG' + - title: '🧰 Maintenance' + labels: + - 'maintenance' + - 'dependencies' + - 'documentation' + - 'docs' + - 'testing' +change-template: '- $TITLE (#$NUMBER)' +exclude-labels: + - 'skip-changelog' +template: | + # Changes + + $CHANGES + + ## Contributors + We'd like to thank all the contributors who worked on this release! + + $CONTRIBUTORS + diff --git a/.github/spellcheck-settings.yml b/.github/spellcheck-settings.yml new file mode 100644 index 0000000000..96abbe6da8 --- /dev/null +++ b/.github/spellcheck-settings.yml @@ -0,0 +1,29 @@ +matrix: +- name: Markdown + expect_match: false + apsell: + lang: en + d: en_US + ignore-case: true + dictionary: + wordlists: + - .github/wordlist.txt + output: wordlist.dic + pipeline: + - pyspelling.filters.markdown: + markdown_extensions: + - markdown.extensions.extra: + - pyspelling.filters.html: + comments: false + attributes: + - alt + ignores: + - ':matches(code, pre)' + - code + - pre + - blockquote + - img + sources: + - '*.md' + - 'docs/*.rst' + - 'docs/*.ipynb' diff --git a/.github/wordlist.txt b/.github/wordlist.txt new file mode 100644 index 0000000000..0a69b9092a --- /dev/null +++ b/.github/wordlist.txt @@ -0,0 +1,163 @@ +APM +ARGV +BFCommands +balancer +CacheImpl +cancelling +CAS +CFCommands +CMSCommands +ClusterNode +ClusterNodes +ClusterPipeline +ClusterPubSub +ConnectionPool +config +CoreCommands +DatabaseConfig +DNS +EchoHealthCheck +EVAL +EVALSHA +failover +FQDN +Grokzen's +Healthcheck +HealthCheckPolicies +healthcheck +healthchecks +INCR +IOError +Instrumentations +JSONCommands +Jaeger +Ludovico +Magnocavallo +MultiDbConfig +MultiDBClient +McCurdy +NOSCRIPT +NoValidDatabaseException +NUMPAT +NUMPT +NUMSUB +OSS +OpenCensus +OpenTelemetry +OpenTracing +Otel +PubSub +READONLY +RediSearch +RedisBloom +RedisCluster +RedisClusterCommands +RedisClusterException +RedisClusters +RedisInstrumentor +RedisJSON +RedisTimeSeries +SHA +SLA +SearchCommands +SentinelCommands +SentinelConnectionPool +Sharded +Solovyov +SpanKind +Specfiying +StatusCode +TCP +TemporaryUnavailableException +TLS +TOPKCommands +TimeSeriesCommands +Uptrace +ValueError +WATCHed +WatchError +api +args +async +asyncio +autoclass +automodule +backoff +bdb +behaviour +bool +boolean +booleans +bysource +charset +del +dev +docstring +docstrings +eg +exc +firsttimersonly +fo +genindex +gmail +hiredis +http +idx +iff +ini +json +keyslot +keyspace +kwarg +kwargs +linters +localhost +lua +makeapullrequest +maxdepth +mget +microservice +microservices +mset +multikey +mykey +nonatomic +observability +opentelemetry +oss +performant +pmessage +png +pre +psubscribe +pubsub +punsubscribe +py +pypi +quickstart +readonly +readwrite +redis +redismodules +reinitialization +replicaof +repo +runtime +sedrik +sharded +ssl +str +stunnel +subcommands +thevalueofmykey +timeseries +toctree +topk +triaging +txt +un +unicode +url +virtualenv +www +yaml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000000..bd6e6b9132 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,68 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ master ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ master ] + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v5 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v4 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000000..272f90838a --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,47 @@ +name: Docs CI + +on: + push: + branches: + - master + - '[0-9].[0-9]' + pull_request: + branches: + - master + - '[0-9].[0-9]' + schedule: + - cron: '0 1 * * *' # nightly build + +concurrency: + group: ${{ github.event.pull_request.number || github.ref }}-docs + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + + build-docs: + name: Build docs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + cache: 'pip' + - name: install deps + run: | + sudo apt-get update -yqq + sudo apt-get install -yqq pandoc make + - name: run code linters + run: | + pip install -r dev_requirements.txt -r docs/requirements.txt + invoke build-docs + + - name: upload docs + uses: actions/upload-artifact@v5 + with: + name: redis-py-docs + path: | + docs/_build/html diff --git a/.github/workflows/hiredis-py-integration.yaml b/.github/workflows/hiredis-py-integration.yaml new file mode 100644 index 0000000000..a780c4d32c --- /dev/null +++ b/.github/workflows/hiredis-py-integration.yaml @@ -0,0 +1,66 @@ +name: Hiredis-py integration tests + +on: + workflow_dispatch: + inputs: + redis-py-branch: + description: 'redis-py branch to run tests on' + required: true + default: 'master' + hiredis-branch: + description: 'hiredis-py branch to run tests on' + required: true + default: 'master' + +concurrency: + group: ${{ github.event.pull_request.number || github.ref }}-hiredis-integration + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # this speeds up coverage with Python 3.12: https://github.com/nedbat/coveragepy/issues/1665 + COVERAGE_CORE: sysmon + CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG: '8.0.2' + CURRENT_REDIS_VERSION: '8.0.2' + +jobs: + redis_version: + runs-on: ubuntu-latest + outputs: + CURRENT: ${{ env.CURRENT_REDIS_VERSION }} + steps: + - name: Compute outputs + run: | + echo "CURRENT=${{ env.CURRENT_REDIS_VERSION }}" >> $GITHUB_OUTPUT + + hiredis-tests: + runs-on: ubuntu-latest + needs: [redis_version] + timeout-minutes: 60 + strategy: + max-parallel: 15 + fail-fast: false + matrix: + redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] + python-version: [ '3.10', '3.14'] + parser-backend: [ 'hiredis' ] + hiredis-version: [ 'unstable' ] + event-loop: [ 'asyncio' ] + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}} (${{ matrix.hiredis-version }}); EL:${{matrix.event-loop}} + steps: + - uses: actions/checkout@v5 + with: + ref: ${{ inputs.redis-py-branch }} + - name: Run tests + uses: ./.github/actions/run-tests + with: + python-version: ${{ matrix.python-version }} + parser-backend: ${{ matrix.parser-backend }} + redis-version: ${{ matrix.redis-version }} + hiredis-version: ${{ matrix.hiredis-version }} + hiredis-branch: ${{ inputs.hiredis-branch }} diff --git a/.github/workflows/install_and_test.sh b/.github/workflows/install_and_test.sh new file mode 100755 index 0000000000..a83cef4089 --- /dev/null +++ b/.github/workflows/install_and_test.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -e + +SUFFIX=$1 +if [ -z ${SUFFIX} ]; then + echo "Supply valid python package extension such as whl or tar.gz. Exiting." + exit 3 +fi + +script=`pwd`/${BASH_SOURCE[0]} +HERE=`dirname ${script}` +ROOT=`realpath ${HERE}/../..` + +cd ${ROOT} +DESTENV=${ROOT}/.venvforinstall +if [ -d ${DESTENV} ]; then + rm -rf ${DESTENV} +fi +python -m venv ${DESTENV} +source ${DESTENV}/bin/activate +pip install --upgrade --quiet pip +pip install --quiet -r dev_requirements.txt +pip uninstall -y redis # uninstall Redis package installed via redis-entraid +invoke devenv --endpoints=all-stack +invoke package + +# find packages +PKG=`ls ${ROOT}/dist/*.${SUFFIX}` +ls -l ${PKG} + +TESTDIR=${ROOT}/STAGETESTS +if [ -d ${TESTDIR} ]; then + rm -rf ${TESTDIR} +fi +mkdir ${TESTDIR} +cp -R ${ROOT}/tests ${TESTDIR}/tests +cd ${TESTDIR} + +# install, run tests +pip install ${PKG} +# Redis tests +pytest -m 'not onlycluster' --ignore=tests/test_scenario --ignore=tests/test_asyncio/test_scenario +# RedisCluster tests +CLUSTER_URL="redis://localhost:16379/0" +CLUSTER_SSL_URL="rediss://localhost:27379/0" +pytest -m 'not onlynoncluster and not redismod and not ssl' \ + --ignore=tests/test_scenario \ + --ignore=tests/test_asyncio/test_scenario \ + --redis-url="${CLUSTER_URL}" \ + --redis-ssl-url="${CLUSTER_SSL_URL}" diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml new file mode 100644 index 0000000000..cb614d499e --- /dev/null +++ b/.github/workflows/integration.yaml @@ -0,0 +1,205 @@ +name: CI + +on: + push: + paths-ignore: + - 'docs/**' + - '**/*.rst' + - '**/*.md' + branches: + - master + - '[0-9].[0-9]' + pull_request: + branches: + - master + - '[0-9].[0-9]' + schedule: + - cron: '0 1 * * *' # nightly build + +concurrency: + group: ${{ github.event.pull_request.number || github.ref }}-integration + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # this speeds up coverage with Python 3.12: https://github.com/nedbat/coveragepy/issues/1665 + COVERAGE_CORE: sysmon + # patch releases get included in the base version image when they are published + # for example after 8.2.1 is published, 8.2 image contains 8.2.1 content + CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG: '8.2' + CURRENT_REDIS_VERSION: '8.2' + +jobs: + dependency-audit: + name: Dependency audit + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: pypa/gh-action-pip-audit@v1.0.8 + with: + inputs: dev_requirements.txt + ignore-vulns: | + GHSA-w596-4wvx-j9j6 # subversion related git pull, dependency for pytest. There is no impact here. + + lint: + name: Code linters + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + cache: 'pip' + - name: run code linters + run: | + pip install -r dev_requirements.txt + pip uninstall -y redis # uninstall Redis package installed via redis-entraid + invoke linters + + redis_version: + runs-on: ubuntu-latest + outputs: + CURRENT: ${{ env.CURRENT_REDIS_VERSION }} + steps: + - name: Compute outputs + run: | + echo "CURRENT=${{ env.CURRENT_REDIS_VERSION }}" >> $GITHUB_OUTPUT + + tests: + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: redis_version + strategy: + max-parallel: 15 + fail-fast: false + matrix: + redis-version: ['8.4-GA-pre.2', '${{ needs.redis_version.outputs.CURRENT }}', '8.0.2' ,'7.4.4', '7.2.9'] + python-version: ['3.10', '3.14'] + parser-backend: ['plain'] + event-loop: ['asyncio'] + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} + steps: + - uses: actions/checkout@v5 + - name: Run tests + uses: ./.github/actions/run-tests + with: + python-version: ${{ matrix.python-version }} + parser-backend: ${{ matrix.parser-backend }} + redis-version: ${{ matrix.redis-version }} + + python-compatibility-tests: + runs-on: ubuntu-latest + needs: [ redis_version ] + timeout-minutes: 60 + strategy: + max-parallel: 15 + fail-fast: false + matrix: + redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] + python-version: ['3.11', '3.12', '3.13', 'pypy-3.10', 'pypy-3.11'] + parser-backend: [ 'plain' ] + event-loop: [ 'asyncio' ] + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} + steps: + - uses: actions/checkout@v5 + - name: Run tests + uses: ./.github/actions/run-tests + with: + python-version: ${{ matrix.python-version }} + parser-backend: ${{ matrix.parser-backend }} + redis-version: ${{ matrix.redis-version }} + + hiredis-tests: + runs-on: ubuntu-latest + needs: [redis_version] + timeout-minutes: 60 + strategy: + max-parallel: 15 + fail-fast: false + matrix: + redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] + python-version: [ '3.10', '3.14'] + parser-backend: [ 'hiredis' ] + hiredis-version: [ '>=3.2.0', '<3.0.0' ] + event-loop: [ 'asyncio' ] + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}} (${{ matrix.hiredis-version }}); EL:${{matrix.event-loop}} + steps: + - uses: actions/checkout@v5 + - name: Run tests + uses: ./.github/actions/run-tests + with: + python-version: ${{ matrix.python-version }} + parser-backend: ${{ matrix.parser-backend }} + redis-version: ${{ matrix.redis-version }} + hiredis-version: ${{ matrix.hiredis-version }} + + uvloop-tests: + runs-on: ubuntu-latest + needs: [redis_version] + timeout-minutes: 60 + strategy: + max-parallel: 15 + fail-fast: false + matrix: + redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] + python-version: [ '3.10', '3.14' ] + parser-backend: [ 'plain' ] + event-loop: [ 'uvloop' ] + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} + steps: + - uses: actions/checkout@v5 + - name: Run tests + uses: ./.github/actions/run-tests + with: + python-version: ${{ matrix.python-version }} + parser-backend: ${{ matrix.parser-backend }} + redis-version: ${{ matrix.redis-version }} + event-loop: ${{ matrix.event-loop }} + + build-and-test-package: + name: Validate building and installing the package + runs-on: ubuntu-latest + needs: [redis_version] + strategy: + fail-fast: false + matrix: + extension: ['tar.gz', 'whl'] + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + - name: Run installed unit tests + env: + CLIENT_LIBS_TEST_IMAGE_TAG: ${{ env.CURRENT_REDIS_VERSION }} + CLIENT_LIBS_TEST_STACK_IMAGE_TAG: ${{ env.CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG }} + run: | + bash .github/workflows/install_and_test.sh ${{ matrix.extension }} + + install-package-from-commit: + name: Install package from commit hash + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ['3.10', '3.11', '3.12', '3.13', '3.14', 'pypy-3.10', 'pypy-3.11'] + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + - name: install from pip + run: | + pip install --quiet git+${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git@${GITHUB_SHA} diff --git a/.github/workflows/pypi-publish.yaml b/.github/workflows/pypi-publish.yaml new file mode 100644 index 0000000000..27e07ca901 --- /dev/null +++ b/.github/workflows/pypi-publish.yaml @@ -0,0 +1,34 @@ +name: Publish tag to Pypi + +on: + release: + types: [published] + workflow_dispatch: + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + + build_and_package: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: install python + uses: actions/setup-python@v6 + with: + python-version: "3.10" + - run: pip install build twine + + - name: Build package + run: python -m build . + + - name: Basic package test prior to upload + run: | + twine check dist/* + + - name: Publish to Pypi + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml new file mode 100644 index 0000000000..6695abfe4b --- /dev/null +++ b/.github/workflows/release-drafter.yml @@ -0,0 +1,24 @@ +name: Release Drafter + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - master + +permissions: {} +jobs: + update_release_draft: + permissions: + pull-requests: write # to add label to PR (release-drafter/release-drafter) + contents: write # to create a github release (release-drafter/release-drafter) + + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into "master" + - uses: release-drafter/release-drafter@v6 + with: + # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml + config-name: release-drafter-config.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml new file mode 100644 index 0000000000..12902ba2e1 --- /dev/null +++ b/.github/workflows/spellcheck.yml @@ -0,0 +1,14 @@ +name: spellcheck +on: + pull_request: +jobs: + check-spelling: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v5 + - name: Check Spelling + uses: rojopolis/spellcheck-github-actions@0.53.0 + with: + config_path: .github/spellcheck-settings.yml + task_name: Markdown diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml new file mode 100644 index 0000000000..04fce61c76 --- /dev/null +++ b/.github/workflows/stale-issues.yml @@ -0,0 +1,95 @@ +name: "Stale Issue Management" +on: + schedule: + # Run daily at midnight UTC + - cron: "0 0 * * *" + workflow_dispatch: # Allow manual triggering + +env: + # Default stale policy timeframes + DAYS_BEFORE_STALE: 365 + DAYS_BEFORE_CLOSE: 30 + + # Accelerated timeline for needs-information issues + NEEDS_INFO_DAYS_BEFORE_STALE: 30 + NEEDS_INFO_DAYS_BEFORE_CLOSE: 7 + +jobs: + stale: + runs-on: ubuntu-latest + steps: + # First step: Handle regular issues (excluding needs-information) + - name: Mark regular issues as stale + uses: actions/stale@v10 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Default stale policy + days-before-stale: ${{ env.DAYS_BEFORE_STALE }} + days-before-close: ${{ env.DAYS_BEFORE_CLOSE }} + + # Explicit stale label configuration + stale-issue-label: "stale" + stale-pr-label: "stale" + + stale-issue-message: | + This issue has been automatically marked as stale due to inactivity. + It will be closed in 30 days if no further activity occurs. + If you believe this issue is still relevant, please add a comment to keep it open. + + close-issue-message: | + This issue has been automatically closed due to inactivity. + If you believe this issue is still relevant, please reopen it or create a new issue with updated information. + + # Exclude needs-information issues from this step + exempt-issue-labels: 'no-stale,needs-information' + + # Remove stale label when issue/PR becomes active again + remove-stale-when-updated: true + + # Apply to pull requests with same timeline + days-before-pr-stale: ${{ env.DAYS_BEFORE_STALE }} + days-before-pr-close: ${{ env.DAYS_BEFORE_CLOSE }} + + stale-pr-message: | + This pull request has been automatically marked as stale due to inactivity. + It will be closed in 30 days if no further activity occurs. + + close-pr-message: | + This pull request has been automatically closed due to inactivity. + If you would like to continue this work, please reopen the PR or create a new one. + + # Only exclude no-stale PRs (needs-information PRs follow standard timeline) + exempt-pr-labels: 'no-stale' + + # Second step: Handle needs-information issues with accelerated timeline + - name: Mark needs-information issues as stale + uses: actions/stale@v10 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Accelerated timeline for needs-information + days-before-stale: ${{ env.NEEDS_INFO_DAYS_BEFORE_STALE }} + days-before-close: ${{ env.NEEDS_INFO_DAYS_BEFORE_CLOSE }} + + # Explicit stale label configuration + stale-issue-label: "stale" + + # Only target ISSUES with needs-information label (not PRs) + only-issue-labels: 'needs-information' + + stale-issue-message: | + This issue has been marked as stale because it requires additional information + that has not been provided for 30 days. It will be closed in 7 days if the + requested information is not provided. + + close-issue-message: | + This issue has been closed because the requested information was not provided within the specified timeframe. + If you can provide the missing information, please reopen this issue or create a new one. + + # Disable PR processing for this step + days-before-pr-stale: -1 + days-before-pr-close: -1 + + # Remove stale label when issue becomes active again + remove-stale-when-updated: true diff --git a/.gitignore b/.gitignore index 7de7594812..7184ad4e20 100644 --- a/.gitignore +++ b/.gitignore @@ -3,11 +3,27 @@ redis.egg-info build/ dist/ dump.rdb -/.tox _build vagrant/.vagrant .python-version .cache .eggs .idea +.vscode .coverage +env +venv +coverage.xml +.venv* +*.xml +.coverage* +prof +profile_output* +docker/stunnel/keys +/dockers/*/node-*/* +/dockers/*/tls/* +/dockers/standalone/ +/dockers/cluster/ +/dockers/replica/ +/dockers/sentinel/ +/dockers/redis-stack/ diff --git a/.mypy.ini b/.mypy.ini new file mode 100644 index 0000000000..942574e0f3 --- /dev/null +++ b/.mypy.ini @@ -0,0 +1,24 @@ +[mypy] +#, docs/examples, tests +files = redis +check_untyped_defs = True +follow_imports_for_stubs asyncio.= True +#disallow_any_decorated = True +disallow_subclassing_any = True +#disallow_untyped_calls = True +disallow_untyped_decorators = True +#disallow_untyped_defs = True +implicit_reexport = False +no_implicit_optional = True +show_error_codes = True +strict_equality = True +warn_incomplete_stub = True +warn_redundant_casts = True +warn_unreachable = True +warn_unused_ignores = True +disallow_any_unimported = True +#warn_return_any = True + +[mypy-redis.asyncio.lock] +# TODO: Remove once locks has been rewritten +ignore_errors = True diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..0600219e1c --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,15 @@ +version: 2 + +python: + install: + - requirements: docs/requirements.txt + - method: pip + path: . + +build: + os: ubuntu-20.04 + tools: + python: "3.10" + +sphinx: + configuration: docs/conf.py diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d530e4af3f..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: python -cache: pip -matrix: - include: - - env: TOXENV=flake8 - - python: 2.7 - env: TOXENV=py27-plain - - python: 2.7 - env: TOXENV=py27-hiredis - - python: 3.5 - env: TOXENV=py35-plain - - python: 3.5 - env: TOXENV=py35-hiredis - - python: 3.6 - env: TOXENV=py36-plain - - python: 3.6 - env: TOXENV=py36-hiredis - - python: 3.7 - env: TOXENV=py37-plain - - python: 3.7 - env: TOXENV=py37-hiredis - - python: 3.8 - env: TOXENV=py38-plain - - python: 3.8 - env: TOXENV=py38-hiredis - - python: pypy - env: TOXENV=pypy-plain - - python: pypy - env: TOXENV=pypy-hiredis - - python: pypy3 - env: TOXENV=pypy3-plain - - python: pypy3 - env: TOXENV=pypy3-hiredis -before_install: - - wget https://github.com/antirez/redis/archive/6.0-rc1.tar.gz && mkdir redis_install && tar -xvzf 6.0-rc1.tar.gz -C redis_install && cd redis_install/redis-6.0-rc1 && make && src/redis-server --daemonize yes && cd ../.. - - redis-cli info -install: - - pip install codecov tox -script: - - tox -after_success: - - "if [[ $TOXENV != 'flake8' ]]; then codecov; fi" diff --git a/CHANGES b/CHANGES index 98f3f73679..db57bdd54e 100644 --- a/CHANGES +++ b/CHANGES @@ -1,52 +1,55 @@ -* (in development) - * Restore try/except clauses to __del__ methods. These will be removed +This file contains only the changes history before redis-py version 4.0.0 +After redis-py version 4.0.0 all changes can be found and tracked in the Release notes pubished in GitHub + +* 3.5.3 (June 1, 2020) + * Restore try/except clauses to __del__ methods. These will be removed in 4.0 when more explicit resource management if enforced. #1339 - * Update the master_address when Sentinels promote a new master. #847 - * Update SentinelConnectionPool to not forcefully disconnect other in-use + * Update the master_address when Sentinels promote a new master. #847 + * Update SentinelConnectionPool to not forcefully disconnect other in-use connections which can negatively affect threaded applications. #1345 * 3.5.2 (May 14, 2020) - * Tune the locking in ConnectionPool.get_connection so that the lock is + * Tune the locking in ConnectionPool.get_connection so that the lock is not held while waiting for the socket to establish and validate the TCP connection. * 3.5.1 (May 9, 2020) - * Fix for HSET argument validation to allow any non-None key. Thanks + * Fix for HSET argument validation to allow any non-None key. Thanks @AleksMat, #1337, #1341 * 3.5.0 (April 29, 2020) - * Removed exception trapping from __del__ methods. redis-py objects that + * Removed exception trapping from __del__ methods. redis-py objects that hold various resources implement __del__ cleanup methods to release those resources when the object goes out of scope. This provides a fallback for when these objects aren't explicitly closed by user code. Prior to this change any errors encountered in closing these resources would be hidden from the user. Thanks @jdufresne. #1281 - * Expanded support for connection strings specifying a username connecting + * Expanded support for connection strings specifying a username connecting to pre-v6 servers. #1274 - * Optimized Lock's blocking_timeout and sleep. If the lock cannot be + * Optimized Lock's blocking_timeout and sleep. If the lock cannot be acquired and the sleep value would cause the loop to sleep beyond blocking_timeout, fail immediately. Thanks @clslgrnc. #1263 - * Added support for passing Python memoryviews to Redis command args that + * Added support for passing Python memoryviews to Redis command args that expect strings or bytes. The memoryview instance is sent directly to the socket such that there are zero copies made of the underlying data during command packing. Thanks @Cody-G. #1265, #1285 - * HSET command now can accept multiple pairs. HMSET has been marked as + * HSET command now can accept multiple pairs. HMSET has been marked as deprecated now. Thanks to @laixintao #1271 - * Don't manually DISCARD when encountering an ExecAbortError. + * Don't manually DISCARD when encountering an ExecAbortError. Thanks @nickgaya, #1300/#1301 - * Reset the watched state of pipelines after calling exec. This saves + * Reset the watched state of pipelines after calling exec. This saves a roundtrip to the server by not having to call UNWATCH within Pipeline.reset(). Thanks @nickgaya, #1299/#1302 - * Added the KEEPTTL option for the SET command. Thanks + * Added the KEEPTTL option for the SET command. Thanks @laixintao #1304/#1280 - * Added the MEMORY STATS command. #1268 - * Lock.extend() now has a new option, `replace_ttl`. When False (the + * Added the MEMORY STATS command. #1268 + * Lock.extend() now has a new option, `replace_ttl`. When False (the default), Lock.extend() adds the `additional_time` to the lock's existing TTL. When replace_ttl=True, the lock's existing TTL is replaced with the value of `additional_time`. - * Add testing and support for PyPy. + * Add testing and support for PyPy. * 3.4.1 - * Move the username argument in the Redis and Connection classes to the + * Move the username argument in the Redis and Connection classes to the end of the argument list. This helps those poor souls that specify all their connection options as non-keyword arguments. #1276 - * Prior to ACL support, redis-py ignored the username component of + * Prior to ACL support, redis-py ignored the username component of Connection URLs. With ACL support, usernames are no longer ignored and are used to authenticate against an ACL rule. Some cloud vendors with managed Redis instances (like Heroku) provide connection URLs with a @@ -54,33 +57,33 @@ username to Redis servers < 6.0.0 results in an error. Attempt to detect this condition and retry the AUTH command with only the password such that authentication continues to work for these users. #1274 - * Removed the __eq__ hooks to Redis and ConnectionPool that were added + * Removed the __eq__ hooks to Redis and ConnectionPool that were added in 3.4.0. This ended up being a bad idea as two separate connection pools be considered equal yet manage a completely separate set of connections. * 3.4.0 - * Allow empty pipelines to be executed if there are WATCHed keys. + * Allow empty pipelines to be executed if there are WATCHed keys. This is a convenient way to test if any of the watched keys changed without actually running any other commands. Thanks @brianmaissy. #1233, #1234 - * Removed support for end of life Python 3.4. - * Added support for all ACL commands in Redis 6. Thanks @IAmATeaPot418 + * Removed support for end of life Python 3.4. + * Added support for all ACL commands in Redis 6. Thanks @IAmATeaPot418 for helping. - * Pipeline instances now always evaluate to True. Prior to this change, + * Pipeline instances now always evaluate to True. Prior to this change, pipeline instances relied on __len__ for boolean evaluation which meant that pipelines with no commands on the stack would be considered False. #994 - * Client instances and Connection pools now support a 'client_name' + * Client instances and Connection pools now support a 'client_name' argument. If supplied, all connections created will call CLIENT SETNAME as soon as the connection is opened. Thanks to @Habbie for supplying the basis of this change. #802 - * Added the 'ssl_check_hostname' argument to specify whether SSL + * Added the 'ssl_check_hostname' argument to specify whether SSL connections should require the server hostname to match the hostname specified in the SSL cert. By default 'ssl_check_hostname' is False for backwards compatibility. #1196 - * Slightly optimized command packing. Thanks @Deneby67. #1255 - * Added support for the TYPE argument to SCAN. Thanks @netocp. #1220 - * Better thread and fork safety in ConnectionPool and + * Slightly optimized command packing. Thanks @Deneby67. #1255 + * Added support for the TYPE argument to SCAN. Thanks @netocp. #1220 + * Better thread and fork safety in ConnectionPool and BlockingConnectionPool. Added better locking to synchronize critical sections rather than relying on CPython-specific implementation details relating to atomic operations. Adjusted how the pools identify and @@ -88,636 +91,636 @@ raised by child processes in the very unlikely chance that a deadlock is encountered. Thanks @gmbnomis, @mdellweg, @yht804421715. #1270, #1138, #1178, #906, #1262 - * Added __eq__ hooks to the Redis and ConnectionPool classes. + * Added __eq__ hooks to the Redis and ConnectionPool classes. Thanks @brainix. #1240 * 3.3.11 - * Further fix for the SSLError -> TimeoutError mapping to work + * Further fix for the SSLError -> TimeoutError mapping to work on obscure releases of Python 2.7. * 3.3.10 - * Fixed a potential error handling bug for the SSLError -> TimeoutError + * Fixed a potential error handling bug for the SSLError -> TimeoutError mapping introduced in 3.3.9. Thanks @zbristow. #1224 * 3.3.9 - * Mapped Python 2.7 SSLError to TimeoutError where appropriate. Timeouts + * Mapped Python 2.7 SSLError to TimeoutError where appropriate. Timeouts should now consistently raise TimeoutErrors on Python 2.7 for both unsecured and secured connections. Thanks @zbristow. #1222 * 3.3.8 - * Fixed MONITOR parsing to properly parse IPv6 client addresses, unix + * Fixed MONITOR parsing to properly parse IPv6 client addresses, unix socket connections and commands issued from Lua. Thanks @kukey. #1201 * 3.3.7 - * Fixed a regression introduced in 3.3.0 where socket.error exceptions + * Fixed a regression introduced in 3.3.0 where socket.error exceptions (or subclasses) could potentially be raised instead of redis.exceptions.ConnectionError. #1202 * 3.3.6 - * Fixed a regression in 3.3.5 that caused PubSub.get_message() to raise + * Fixed a regression in 3.3.5 that caused PubSub.get_message() to raise a socket.timeout exception when passing a timeout value. #1200 * 3.3.5 - * Fix an issue where socket.timeout errors could be handled by the wrong + * Fix an issue where socket.timeout errors could be handled by the wrong exception handler in Python 2.7. * 3.3.4 - * More specifically identify nonblocking read errors for both SSL and + * More specifically identify nonblocking read errors for both SSL and non-SSL connections. 3.3.1, 3.3.2 and 3.3.3 on Python 2.7 could potentially mask a ConnectionError. #1197 * 3.3.3 - * The SSL module in Python < 2.7.9 handles non-blocking sockets + * The SSL module in Python < 2.7.9 handles non-blocking sockets differently than 2.7.9+. This patch accommodates older versions. #1197 * 3.3.2 - * Further fixed a regression introduced in 3.3.0 involving SSL and + * Further fixed a regression introduced in 3.3.0 involving SSL and non-blocking sockets. #1197 * 3.3.1 - * Fixed a regression introduced in 3.3.0 involving SSL and non-blocking + * Fixed a regression introduced in 3.3.0 involving SSL and non-blocking sockets. #1197 * 3.3.0 - * Resolve a race condition with the PubSubWorkerThread. #1150 - * Cleanup socket read error messages. Thanks Vic Yu. #1159 - * Cleanup the Connection's selector correctly. Thanks Bruce Merry. #1153 - * Added a Monitor object to make working with MONITOR output easy. + * Resolve a race condition with the PubSubWorkerThread. #1150 + * Cleanup socket read error messages. Thanks Vic Yu. #1159 + * Cleanup the Connection's selector correctly. Thanks Bruce Merry. #1153 + * Added a Monitor object to make working with MONITOR output easy. Thanks Roey Prat #1033 - * Internal cleanup: Removed the legacy Token class which was necessary + * Internal cleanup: Removed the legacy Token class which was necessary with older version of Python that are no longer supported. #1066 - * Response callbacks are now case insensitive. This allows users that + * Response callbacks are now case insensitive. This allows users that call Redis.execute_command() directly to pass lower-case command names and still get reasonable responses. #1168 - * Added support for hiredis-py 1.0.0 encoding error support. This should + * Added support for hiredis-py 1.0.0 encoding error support. This should make the PythonParser and the HiredisParser behave identically when encountering encoding errors. Thanks Brian Candler. #1161/#1162 - * All authentication errors now properly raise AuthenticationError. + * All authentication errors now properly raise AuthenticationError. AuthenticationError is now a subclass of ConnectionError, which will cause the connection to be disconnected and cleaned up appropriately. #923 - * Add READONLY and READWRITE commands. Thanks @theodesp. #1114 - * Remove selectors in favor of nonblocking sockets. Selectors had + * Add READONLY and READWRITE commands. Thanks @theodesp. #1114 + * Remove selectors in favor of nonblocking sockets. Selectors had issues in some environments including eventlet and gevent. This should resolve those issues with no other side effects. - * Fixed an issue with XCLAIM and previously claimed but not removed + * Fixed an issue with XCLAIM and previously claimed but not removed messages. Thanks @thomdask. #1192/#1191 - * Allow for single connection client instances. These instances + * Allow for single connection client instances. These instances are not thread safe but offer other benefits including a subtle performance increase. - * Added extensive health checks that keep the connections lively. + * Added extensive health checks that keep the connections lively. Passing the "health_check_interval=N" option to the Redis client class or to a ConnectionPool ensures that a round trip PING/PONG is successful before any command if the underlying connection has been idle for more than N seconds. ConnectionErrors and TimeoutErrors are automatically retried once for health checks. - * Changed the PubSubWorkerThread to use a threading.Event object rather + * Changed the PubSubWorkerThread to use a threading.Event object rather than a boolean to control the thread's life cycle. Thanks Timothy Rule. #1194/#1195. - * Fixed a bug in Pipeline error handling that would incorrectly retry + * Fixed a bug in Pipeline error handling that would incorrectly retry ConnectionErrors. * 3.2.1 - * Fix SentinelConnectionPool to work in multiprocess/forked environments. + * Fix SentinelConnectionPool to work in multiprocess/forked environments. * 3.2.0 - * Added support for `select.poll` to test whether data can be read + * Added support for `select.poll` to test whether data can be read on a socket. This should allow for significantly more connections to be used with pubsub. Fixes #486/#1115 - * Attempt to guarantee that the ConnectionPool hands out healthy + * Attempt to guarantee that the ConnectionPool hands out healthy connections. Healthy connections are those that have an established socket connection to the Redis server, are ready to accept a command and have no data available to read. Fixes #1127/#886 - * Use the socket.IPPROTO_TCP constant instead of socket.SOL_TCP. + * Use the socket.IPPROTO_TCP constant instead of socket.SOL_TCP. IPPROTO_TCP is available on more interpreters (Jython for instance). Thanks @Junnplus. #1130 - * Fixed a regression introduced in 3.0 that mishandles exceptions not + * Fixed a regression introduced in 3.0 that mishandles exceptions not derived from the base Exception class. KeyboardInterrupt and gevent.timeout notable. Thanks Christian Fersch. #1128/#1129 - * Significant improvements to handing connections with forked processes. + * Significant improvements to handing connections with forked processes. Parent and child processes no longer trample on each others' connections. Thanks to Jay Rolette for the patch and highlighting this issue. #504/#732/#784/#863 - * PythonParser no longer closes the associated connection's socket. The + * PythonParser no longer closes the associated connection's socket. The connection itself will close the socket. #1108/#1085 * 3.1.0 - * Connection URLs must have one of the following schemes: + * Connection URLs must have one of the following schemes: redis://, rediss://, unix://. Thanks @jdupl123. #961/#969 - * Fixed an issue with retry_on_timeout logic that caused some TimeoutErrors + * Fixed an issue with retry_on_timeout logic that caused some TimeoutErrors to be retried. Thanks Aaron Yang. #1022/#1023 - * Added support for SNI for SSL. Thanks @oridistor and Roey Prat. #1087 - * Fixed ConnectionPool repr for pools with no connections. Thanks + * Added support for SNI for SSL. Thanks @oridistor and Roey Prat. #1087 + * Fixed ConnectionPool repr for pools with no connections. Thanks Cody Scott. #1043/#995 - * Fixed GEOHASH to return a None value when specifying a place that + * Fixed GEOHASH to return a None value when specifying a place that doesn't exist on the server. Thanks @guybe7. #1126 - * Fixed XREADGROUP to return an empty dictionary for messages that + * Fixed XREADGROUP to return an empty dictionary for messages that have been deleted but still exist in the unacknowledged queue. Thanks @xeizmendi. #1116 - * Added an owned method to Lock objects. owned returns a boolean + * Added an owned method to Lock objects. owned returns a boolean indicating whether the current lock instance still owns the lock. Thanks Dave Johansen. #1112 - * Allow lock.acquire() to accept an optional token argument. If + * Allow lock.acquire() to accept an optional token argument. If provided, the token argument is used as the unique value used to claim the lock. Thankd Dave Johansen. #1112 - * Added a reacquire method to Lock objects. reacquire attempts to renew + * Added a reacquire method to Lock objects. reacquire attempts to renew the lock such that the timeout is extended to the same value that the lock was initially acquired with. Thanks Ihor Kalnytskyi. #1014 - * Stream names found within XREAD and XREADGROUP responses now properly + * Stream names found within XREAD and XREADGROUP responses now properly respect the decode_responses flag. - * XPENDING_RANGE now requires the user the specify the min, max and + * XPENDING_RANGE now requires the user the specify the min, max and count arguments. Newer versions of Redis prevent count from being infinite so it's left to the user to specify these values explicitly. - * ZADD now returns None when xx=True and incr=True and an element + * ZADD now returns None when xx=True and incr=True and an element is specified that doesn't exist in the sorted set. This matches what the server returns in this case. #1084 - * Added client_kill_filter that accepts various filters to identify + * Added client_kill_filter that accepts various filters to identify and kill clients. Thanks Theofanis Despoudis. #1098 - * Fixed a race condition that occurred when unsubscribing and + * Fixed a race condition that occurred when unsubscribing and resubscribing to the same channel or pattern in rapid succession. Thanks Marcin Raczyński. #764 - * Added a LockNotOwnedError that is raised when trying to extend or + * Added a LockNotOwnedError that is raised when trying to extend or release a lock that is no longer owned. This is a subclass of LockError so previous code should continue to work as expected. Thanks Joshua Harlow. #1095 - * Fixed a bug in GEORADIUS that forced decoding of places without + * Fixed a bug in GEORADIUS that forced decoding of places without respecting the decode_responses option. Thanks Bo Bayles. #1082 * 3.0.1 - * Fixed regression with UnixDomainSocketConnection caused by 3.0.0. + * Fixed regression with UnixDomainSocketConnection caused by 3.0.0. Thanks Jyrki Muukkonen - * Fixed an issue with the new asynchronous flag on flushdb and flushall. + * Fixed an issue with the new asynchronous flag on flushdb and flushall. Thanks rogeryen - * Updated Lock.locked() method to indicate whether *any* process has + * Updated Lock.locked() method to indicate whether *any* process has acquired the lock, not just the current one. This is in line with the behavior of threading.Lock. Thanks Alan Justino da Silva * 3.0.0 BACKWARDS INCOMPATIBLE CHANGES - * When using a Lock as a context manager and the lock fails to be acquired + * When using a Lock as a context manager and the lock fails to be acquired a LockError is now raised. This prevents the code block inside the context manager from being executed if the lock could not be acquired. - * Renamed LuaLock to Lock. - * Removed the pipeline based Lock implementation in favor of the LuaLock + * Renamed LuaLock to Lock. + * Removed the pipeline based Lock implementation in favor of the LuaLock implementation. - * Only bytes, strings and numbers (ints, longs and floats) are acceptable + * Only bytes, strings and numbers (ints, longs and floats) are acceptable for keys and values. Previously redis-py attempted to cast other types to str() and store the result. This caused must confusion and frustration when passing boolean values (cast to 'True' and 'False') or None values (cast to 'None'). It is now the user's responsibility to cast all key names and values to bytes, strings or numbers before passing the value to redis-py. - * The StrictRedis class has been renamed to Redis. StrictRedis will + * The StrictRedis class has been renamed to Redis. StrictRedis will continue to exist as an alias of Redis for the foreseeable future. - * The legacy Redis client class has been removed. It caused much confusion + * The legacy Redis client class has been removed. It caused much confusion to users. - * ZINCRBY arguments 'value' and 'amount' have swapped order to match the + * ZINCRBY arguments 'value' and 'amount' have swapped order to match the the Redis server. The new argument order is: keyname, amount, value. - * MGET no longer raises an error if zero keys are passed in. Instead an + * MGET no longer raises an error if zero keys are passed in. Instead an empty list is returned. - * MSET and MSETNX now require all keys/values to be specified in a single + * MSET and MSETNX now require all keys/values to be specified in a single dictionary argument named mapping. This was changed to allow for future options to these commands in the future. - * ZADD now requires all element names/scores be specified in a single + * ZADD now requires all element names/scores be specified in a single dictionary argument named mapping. This was required to allow the NX, XX, CH and INCR options to be specified. - * ssl_cert_reqs now has a default value of 'required' by default. This + * ssl_cert_reqs now has a default value of 'required' by default. This should make connecting to a remote Redis server over SSL more secure. Thanks u2mejc - * Removed support for EOL Python 2.6 and 3.3. Thanks jdufresne + * Removed support for EOL Python 2.6 and 3.3. Thanks jdufresne OTHER CHANGES - * Added missing DECRBY command. Thanks derek-dchu - * CLUSTER INFO and CLUSTER NODES responses are now properly decoded to + * Added missing DECRBY command. Thanks derek-dchu + * CLUSTER INFO and CLUSTER NODES responses are now properly decoded to strings. - * Added a 'locked()' method to Lock objects. This method returns True + * Added a 'locked()' method to Lock objects. This method returns True if the lock has been acquired and owned by the current process, otherwise False. - * EXISTS now supports multiple keys. It's return value is now the number + * EXISTS now supports multiple keys. It's return value is now the number of keys in the list that exist. - * Ensure all commands can accept key names as bytes. This fixes issues + * Ensure all commands can accept key names as bytes. This fixes issues with BLPOP, BRPOP and SORT. - * All errors resulting from bad user input are raised as DataError + * All errors resulting from bad user input are raised as DataError exceptions. DataError is a subclass of RedisError so this should be transparent to anyone previously catching these. - * Added support for NX, XX, CH and INCR options to ZADD - * Added support for the MIGRATE command - * Added support for the MEMORY USAGE and MEMORY PURGE commands. Thanks + * Added support for NX, XX, CH and INCR options to ZADD + * Added support for the MIGRATE command + * Added support for the MEMORY USAGE and MEMORY PURGE commands. Thanks Itamar Haber - * Added support for the 'asynchronous' argument to FLUSHDB and FLUSHALL + * Added support for the 'asynchronous' argument to FLUSHDB and FLUSHALL commands. Thanks Itamar Haber - * Added support for the BITFIELD command. Thanks Charles Leifer and + * Added support for the BITFIELD command. Thanks Charles Leifer and Itamar Haber - * Improved performance on pipeline requests with large chunks of data. + * Improved performance on pipeline requests with large chunks of data. Thanks tzickel - * Fixed test suite to not fail if another client is connected to the + * Fixed test suite to not fail if another client is connected to the server the tests are running against. - * Added support for SWAPDB. Thanks Itamar Haber - * Added support for all STREAM commands. Thanks Roey Prat and Itamar Haber - * SHUTDOWN now accepts the 'save' and 'nosave' arguments. Thanks + * Added support for SWAPDB. Thanks Itamar Haber + * Added support for all STREAM commands. Thanks Roey Prat and Itamar Haber + * SHUTDOWN now accepts the 'save' and 'nosave' arguments. Thanks dwilliams-kenzan - * Added support for ZPOPMAX, ZPOPMIN, BZPOPMAX, BZPOPMIN. Thanks + * Added support for ZPOPMAX, ZPOPMIN, BZPOPMAX, BZPOPMIN. Thanks Itamar Haber - * Added support for the 'type' argument in CLIENT LIST. Thanks Roey Prat - * Added support for CLIENT PAUSE. Thanks Roey Prat - * Added support for CLIENT ID and CLIENT UNBLOCK. Thanks Itamar Haber - * GEODIST now returns a None value when referencing a place that does + * Added support for the 'type' argument in CLIENT LIST. Thanks Roey Prat + * Added support for CLIENT PAUSE. Thanks Roey Prat + * Added support for CLIENT ID and CLIENT UNBLOCK. Thanks Itamar Haber + * GEODIST now returns a None value when referencing a place that does not exist. Thanks qingping209 - * Added a ping() method to pubsub objects. Thanks krishan-carbon - * Fixed a bug with keys in the INFO dict that contained ':' symbols. + * Added a ping() method to pubsub objects. Thanks krishan-carbon + * Fixed a bug with keys in the INFO dict that contained ':' symbols. Thanks mzalimeni - * Fixed the select system call retry compatibility with Python 2.x. + * Fixed the select system call retry compatibility with Python 2.x. Thanks lddubeau - * max_connections is now a valid querystring argument for creating + * max_connections is now a valid querystring argument for creating connection pools from URLs. Thanks mmaslowskicc - * Added the UNLINK command. Thanks yozel - * Added socket_type option to Connection for configurability. + * Added the UNLINK command. Thanks yozel + * Added socket_type option to Connection for configurability. Thanks garlicnation - * Lock.do_acquire now atomically sets acquires the lock and sets the + * Lock.do_acquire now atomically sets acquires the lock and sets the expire value via set(nx=True, px=timeout). Thanks 23doors - * Added 'count' argument to SPOP. Thanks AlirezaSadeghi - * Fixed an issue parsing client_list responses that contained an '='. + * Added 'count' argument to SPOP. Thanks AlirezaSadeghi + * Fixed an issue parsing client_list responses that contained an '='. Thanks swilly22 * 2.10.6 - * Various performance improvements. Thanks cjsimpson - * Fixed a bug with SRANDMEMBER where the behavior for `number=0` did + * Various performance improvements. Thanks cjsimpson + * Fixed a bug with SRANDMEMBER where the behavior for `number=0` did not match the spec. Thanks Alex Wang - * Added HSTRLEN command. Thanks Alexander Putilin - * Added the TOUCH command. Thanks Anis Jonischkeit - * Remove unnecessary calls to the server when registering Lua scripts. + * Added HSTRLEN command. Thanks Alexander Putilin + * Added the TOUCH command. Thanks Anis Jonischkeit + * Remove unnecessary calls to the server when registering Lua scripts. Thanks Ben Greenberg - * SET's EX and PX arguments now allow values of zero. Thanks huangqiyin - * Added PUBSUB {CHANNELS, NUMPAT, NUMSUB} commands. Thanks Angus Pearson - * PubSub connections that encounter `InterruptedError`s now + * SET's EX and PX arguments now allow values of zero. Thanks huangqiyin + * Added PUBSUB {CHANNELS, NUMPAT, NUMSUB} commands. Thanks Angus Pearson + * PubSub connections that encounter `InterruptedError`s now retry automatically. Thanks Carlton Gibson and Seth M. Larson - * LPUSH and RPUSH commands run on PyPy now correctly returns the number + * LPUSH and RPUSH commands run on PyPy now correctly returns the number of items of the list. Thanks Jeong YunWon - * Added support to automatically retry socket EINTR errors. Thanks + * Added support to automatically retry socket EINTR errors. Thanks Thomas Steinacher - * PubSubWorker threads started with `run_in_thread` are now daemonized + * PubSubWorker threads started with `run_in_thread` are now daemonized so the thread shuts down when the running process goes away. Thanks Keith Ainsworth - * Added support for GEO commands. Thanks Pau Freixes, Alex DeBrie and + * Added support for GEO commands. Thanks Pau Freixes, Alex DeBrie and Abraham Toriz - * Made client construction from URLs smarter. Thanks Tim Savage - * Added support for CLUSTER * commands. Thanks Andy Huang - * The RESTORE command now accepts an optional `replace` boolean. + * Made client construction from URLs smarter. Thanks Tim Savage + * Added support for CLUSTER * commands. Thanks Andy Huang + * The RESTORE command now accepts an optional `replace` boolean. Thanks Yoshinari Takaoka - * Attempt to connect to a new Sentinel if a TimeoutError occurs. Thanks + * Attempt to connect to a new Sentinel if a TimeoutError occurs. Thanks Bo Lopker - * Fixed a bug in the client's `__getitem__` where a KeyError would be + * Fixed a bug in the client's `__getitem__` where a KeyError would be raised if the value returned by the server is an empty string. Thanks Javier Candeira. - * Socket timeouts when connecting to a server are now properly raised + * Socket timeouts when connecting to a server are now properly raised as TimeoutErrors. * 2.10.5 - * Allow URL encoded parameters in Redis URLs. Characters like a "/" can + * Allow URL encoded parameters in Redis URLs. Characters like a "/" can now be URL encoded and redis-py will correctly decode them. Thanks Paul Keene. - * Added support for the WAIT command. Thanks https://github.com/eshizhan - * Better shutdown support for the PubSub Worker Thread. It now properly + * Added support for the WAIT command. Thanks + * Better shutdown support for the PubSub Worker Thread. It now properly cleans up the connection, unsubscribes from any channels and patterns previously subscribed to and consumes any waiting messages on the socket. - * Added the ability to sleep for a brief period in the event of a + * Added the ability to sleep for a brief period in the event of a WatchError occurring. Thanks Joshua Harlow. - * Fixed a bug with pipeline error reporting when dealing with characters + * Fixed a bug with pipeline error reporting when dealing with characters in error messages that could not be encoded to the connection's character set. Thanks Hendrik Muhs. - * Fixed a bug in Sentinel connections that would inadvertently connect + * Fixed a bug in Sentinel connections that would inadvertently connect to the master when the connection pool resets. Thanks - https://github.com/df3n5 - * Better timeout support in Pubsub get_message. Thanks Andy Isaacson. - * Fixed a bug with the HiredisParser that would cause the parser to + + * Better timeout support in Pubsub get_message. Thanks Andy Isaacson. + * Fixed a bug with the HiredisParser that would cause the parser to get stuck in an endless loop if a specific number of bytes were delivered from the socket. This fix also increases performance of parsing large responses from the Redis server. - * Added support for ZREVRANGEBYLEX. - * ConnectionErrors are now raised if Redis refuses a connection due to + * Added support for ZREVRANGEBYLEX. + * ConnectionErrors are now raised if Redis refuses a connection due to the maxclients limit being exceeded. Thanks Roman Karpovich. - * max_connections can now be set when instantiating client instances. + * max_connections can now be set when instantiating client instances. Thanks Ohad Perry. * 2.10.4 (skipped due to a PyPI snafu) * 2.10.3 - * Fixed a bug with the bytearray support introduced in 2.10.2. Thanks + * Fixed a bug with the bytearray support introduced in 2.10.2. Thanks Josh Owen. * 2.10.2 - * Added support for Hiredis's new bytearray support. Thanks - https://github.com/tzickel - * POSSIBLE BACKWARDS INCOMPATBLE CHANGE: Fixed a possible race condition + * Added support for Hiredis's new bytearray support. Thanks + + * POSSIBLE BACKWARDS INCOMPATIBLE CHANGE: Fixed a possible race condition when multiple threads share the same Lock instance with a timeout. Lock tokens are now stored in thread local storage by default. If you have code that acquires a lock in one thread and passes that lock instance to another thread to release it, you need to disable thread local storage. Refer to the doc strings on the Lock class about the thread_local argument information. - * Fixed a regression in from_url where "charset" and "errors" weren't + * Fixed a regression in from_url where "charset" and "errors" weren't valid options. "encoding" and "encoding_errors" are still accepted and preferred. - * The "charset" and "errors" options have been deprecated. Passing + * The "charset" and "errors" options have been deprecated. Passing either to StrictRedis.__init__ or from_url will still work but will also emit a DeprecationWarning. Instead use the "encoding" and "encoding_errors" options. - * Fixed a compatibility bug with Python 3 when the server closes a + * Fixed a compatibility bug with Python 3 when the server closes a connection. - * Added BITPOS command. Thanks https://github.com/jettify. - * Fixed a bug when attempting to send large values to Redis in a Pipeline. + * Added BITPOS command. Thanks . + * Fixed a bug when attempting to send large values to Redis in a Pipeline. * 2.10.1 - * Fixed a bug where Sentinel connections to a server that's no longer a + * Fixed a bug where Sentinel connections to a server that's no longer a master and receives a READONLY error will disconnect and reconnect to the master. * 2.10.0 - * Discontinued support for Python 2.5. Upgrade. You'll be happier. - * The HiRedis parser will now properly raise ConnectionErrors. - * Completely refactored PubSub support. Fixes all known PubSub bugs and + * Discontinued support for Python 2.5. Upgrade. You'll be happier. + * The HiRedis parser will now properly raise ConnectionErrors. + * Completely refactored PubSub support. Fixes all known PubSub bugs and adds a bunch of new features. Docs can be found in the README under the new "Publish / Subscribe" section. - * Added the new HyperLogLog commands (PFADD, PFCOUNT, PFMERGE). Thanks + * Added the new HyperLogLog commands (PFADD, PFCOUNT, PFMERGE). Thanks Pepijn de Vos and Vincent Ohprecio. - * Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus + * Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus Kaiserswerth. - * *SCAN commands now return a long (int on Python3) cursor value rather + * *SCAN commands now return a long (int on Python3) cursor value rather than the string representation. This might be slightly backwards - incompatible in code using *SCAN commands loops such as +incompatible in code using*SCAN commands loops such as "while cursor != '0':". - * Added extra *SCAN commands that return iterators instead of the normal + * Added extra *SCAN commands that return iterators instead of the normal [cursor, data] type. Use scan_iter, hscan_iter, sscan_iter, and zscan_iter for iterators. Thanks Mathieu Longtin. - * Added support for SLOWLOG commands. Thanks Rick van Hattem. - * Added lexicographical commands ZRANGEBYLEX, ZREMRANGEBYLEX, and ZLEXCOUNT + * Added support for SLOWLOG commands. Thanks Rick van Hattem. + * Added lexicographical commands ZRANGEBYLEX, ZREMRANGEBYLEX, and ZLEXCOUNT for sorted sets. - * Connection objects now support an optional argument, socket_read_size, + * Connection objects now support an optional argument, socket_read_size, indicating how much data to read during each socket.recv() call. After benchmarking, increased the default size to 64k, which dramatically improves performance when fetching large values, such as many results in a pipeline or a large (>1MB) string value. - * Improved the pack_command and send_packed_command functions to increase + * Improved the pack_command and send_packed_command functions to increase performance when sending large (>1MB) values. - * Sentinel Connections to master servers now detect when a READONLY error + * Sentinel Connections to master servers now detect when a READONLY error is encountered and disconnect themselves and all other active connections to the same master so that the new master can be discovered. - * Fixed Sentinel state parsing on Python 3. - * Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET + * Fixed Sentinel state parsing on Python 3. + * Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET commands. Thanks Greg Murphy. - * INFO ouput that doesn't follow the "key:value" format will now be + * INFO output that doesn't follow the "key:value" format will now be appended to a key named "__raw__" in the INFO dictionary. Thanks Pedro Larroy. - * The "vagrant" directory contains a complete vagrant environment for + * The "vagrant" directory contains a complete vagrant environment for redis-py developers. The environment runs a Redis master, a Redis slave, - and 3 Sentinels. Future iterations of the test sutie will incorporate + and 3 Sentinels. Future iterations of the test suite will incorporate more integration style tests, ensuring things like failover happen correctly. - * It's now possible to create connection pool instances from a URL. + * It's now possible to create connection pool instances from a URL. StrictRedis.from_url() now uses this feature to create a connection pool instance and use that when creating a new client instance. Thanks - https://github.com/chillipino - * When creating client instances or connection pool instances from an URL, + + * When creating client instances or connection pool instances from an URL, it's now possible to pass additional options to the connection pool with querystring arguments. - * Fixed a bug where some encodings (like utf-16) were unusable on Python 3 + * Fixed a bug where some encodings (like utf-16) were unusable on Python 3 as command names and literals would get encoded. - * Added an SSLConnection class that allows for secure connections through - stunnel or other means. Construct and SSL connection with the sll=True + * Added an SSLConnection class that allows for secure connections through + stunnel or other means. Construct an SSL connection with the ssl=True option on client classes, using the rediss:// scheme from an URL, or by passing the SSLConnection class to a connection pool's - connection_class argument. Thanks https://github.com/oranagra. - * Added a socket_connect_timeout option to control how long to wait while + connection_class argument. Thanks . + * Added a socket_connect_timeout option to control how long to wait while establishing a TCP connection before timing out. This lets the client fail fast when attempting to connect to a downed server while keeping a more lenient timeout for all other socket operations. - * Added TCP Keep-alive support by passing use the socket_keepalive=True + * Added TCP Keep-alive support by passing use the socket_keepalive=True option. Finer grain control can be achieved using the socket_keepalive_options option which expects a dictionary with any of the keys (socket.TCP_KEEPIDLE, socket.TCP_KEEPCNT, socket.TCP_KEEPINTVL) and integers for values. Thanks Yossi Gottlieb. - * Added a `retry_on_timeout` option that controls how socket.timeout errors + * Added a `retry_on_timeout` option that controls how socket.timeout errors are handled. By default it is set to False and will cause the client to raise a TimeoutError anytime a socket.timeout is encountered. If `retry_on_timeout` is set to True, the client will retry a command that timed out once like other `socket.error`s. - * Completely refactored the Lock system. There is now a LuaLock class + * Completely refactored the Lock system. There is now a LuaLock class that's used when the Redis server is capable of running Lua scripts along with a fallback class for Redis servers < 2.6. The new locks fix several subtle race consider that the old lock could face. In additional, a new method, "extend" is available on lock instances that all a lock owner to extend the amount of time they have the lock for. Thanks to - Eli Finkelshteyn and https://github.com/chillipino for contributions. + Eli Finkelshteyn and for contributions. * 2.9.1 - * IPv6 support. Thanks https://github.com/amashinchi + * IPv6 support. Thanks * 2.9.0 - * Performance improvement for packing commands when using the PythonParser. + * Performance improvement for packing commands when using the PythonParser. Thanks Guillaume Viot. - * Executing an empty pipeline transaction no longer sends MULTI/EXEC to + * Executing an empty pipeline transaction no longer sends MULTI/EXEC to the server. Thanks EliFinkelshteyn. - * Errors when authenticating (incorrect password) and selecting a database + * Errors when authenticating (incorrect password) and selecting a database now close the socket. - * Full Sentinel support thanks to Vitja Makarov. Thanks! - * Better repr support for client and connection pool instances. Thanks + * Full Sentinel support thanks to Vitja Makarov. Thanks! + * Better repr support for client and connection pool instances. Thanks Mark Roberts. - * Error messages that the server sends to the client are now included + * Error messages that the server sends to the client are now included in the client error message. Thanks Sangjin Lim. - * Added the SCAN, SSCAN, HSCAN, and ZSCAN commands. Thanks Jingchao Hu. - * ResponseErrors generated by pipeline execution provide addition context + * Added the SCAN, SSCAN, HSCAN, and ZSCAN commands. Thanks Jingchao Hu. + * ResponseErrors generated by pipeline execution provide addition context including the position of the command in the pipeline and the actual command text generated the error. - * ConnectionPools now play nicer in threaded environments that fork. Thanks + * ConnectionPools now play nicer in threaded environments that fork. Thanks Christian Joergensen. * 2.8.0 - * redis-py should play better with gevent when a gevent Timeout is raised. + * redis-py should play better with gevent when a gevent Timeout is raised. Thanks leifkb. - * Added SENTINEL command. Thanks Anna Janackova. - * Fixed a bug where pipelines could potentially corrupt a connection + * Added SENTINEL command. Thanks Anna Janackova. + * Fixed a bug where pipelines could potentially corrupt a connection if the MULTI command generated a ResponseError. Thanks EliFinkelshteyn for the report. - * Connections now call socket.shutdown() prior to socket.close() to + * Connections now call socket.shutdown() prior to socket.close() to ensure communication ends immediately per the note at - https://docs.python.org/2/library/socket.html#socket.socket.close + Thanks to David Martin for pointing this out. - * Lock checks are now based on floats rather than ints. Thanks + * Lock checks are now based on floats rather than ints. Thanks Vitja Makarov. * 2.7.6 - * Added CONFIG RESETSTAT command. Thanks Yossi Gottlieb. - * Fixed a bug introduced in 2.7.3 that caused issues with script objects + * Added CONFIG RESETSTAT command. Thanks Yossi Gottlieb. + * Fixed a bug introduced in 2.7.3 that caused issues with script objects and pipelines. Thanks Carpentier Pierre-Francois. - * Converted redis-py's test suite to use the awesome py.test library. - * Fixed a bug introduced in 2.7.5 that prevented a ConnectionError from + * Converted redis-py's test suite to use the awesome py.test library. + * Fixed a bug introduced in 2.7.5 that prevented a ConnectionError from being raised when the Redis server is LOADING data. - * Added a BusyLoadingError exception that's raised when the Redis server + * Added a BusyLoadingError exception that's raised when the Redis server is starting up and not accepting commands yet. BusyLoadingError subclasses ConnectionError, which this state previously returned. Thanks Yossi Gottlieb. * 2.7.5 - * DEL, HDEL and ZREM commands now return the numbers of keys deleted + * DEL, HDEL and ZREM commands now return the numbers of keys deleted instead of just True/False. - * from_url now supports URIs with a port number. Thanks Aaron Westendorf. + * from_url now supports URIs with a port number. Thanks Aaron Westendorf. * 2.7.4 - * Added missing INCRBY method. Thanks Krzysztof Dorosz. - * SET now accepts the EX, PX, NX and XX options from Redis 2.6.12. These + * Added missing INCRBY method. Thanks Krzysztof Dorosz. + * SET now accepts the EX, PX, NX and XX options from Redis 2.6.12. These options will generate errors if these options are used when connected to a Redis server < 2.6.12. Thanks George Yoshida. * 2.7.3 - * Fixed a bug with BRPOPLPUSH and lists with empty strings. - * All empty except: clauses have been replaced to only catch Exception + * Fixed a bug with BRPOPLPUSH and lists with empty strings. + * All empty except: clauses have been replaced to only catch Exception subclasses. This prevents a KeyboardInterrupt from triggering exception handlers. Thanks Lucian Branescu Mihaila. - * All exceptions that are the result of redis server errors now share a + * All exceptions that are the result of redis server errors now share a command Exception subclass, ServerError. Thanks Matt Robenolt. - * Prevent DISCARD from being called if MULTI wasn't also called. Thanks + * Prevent DISCARD from being called if MULTI wasn't also called. Thanks Pete Aykroyd. - * SREM now returns an integer indicating the number of items removed from - the set. Thanks https://github.com/ronniekk. - * Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3. + * SREM now returns an integer indicating the number of items removed from + the set. Thanks . + * Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3. Thanks Nathan Wan. - * Added CLIENT GETNAME and CLIENT SETNAME commands. - Thanks https://github.com/bitterb. - * It's now possible to use len() on a pipeline instance to determine the + * Added CLIENT GETNAME and CLIENT SETNAME commands. + Thanks . + * It's now possible to use len() on a pipeline instance to determine the number of commands that will be executed. Thanks Jon Parise. - * Fixed a bug in INFO's parse routine with floating point numbers. Thanks + * Fixed a bug in INFO's parse routine with floating point numbers. Thanks Ali Onur Uyar. - * Fixed a bug with BITCOUNT to allow `start` and `end` to both be zero. + * Fixed a bug with BITCOUNT to allow `start` and `end` to both be zero. Thanks Tim Bart. - * The transaction() method now accepts a boolean keyword argument, + * The transaction() method now accepts a boolean keyword argument, value_from_callable. By default, or if False is passes, the transaction() method will return the value of the pipelines execution. Otherwise, it will return whatever func() returns. - * Python3 compatibility fix ensuring we're not already bytes(). Thanks + * Python3 compatibility fix ensuring we're not already bytes(). Thanks Salimane Adjao Moustapha. - * Added PSETEX. Thanks YAMAMOTO Takashi. - * Added a BlockingConnectionPool to limit the number of connections that + * Added PSETEX. Thanks YAMAMOTO Takashi. + * Added a BlockingConnectionPool to limit the number of connections that can be created. Thanks James Arthur. - * SORT now accepts a `groups` option that if specified, will return + * SORT now accepts a `groups` option that if specified, will return tuples of n-length, where n is the number of keys specified in the GET argument. This allows for convenient row-based iteration. Thanks Ionuț Arțăriși. * 2.7.2 - * Parse errors are now *always* raised on multi/exec pipelines, regardless + * Parse errors are now *always* raised on multi/exec pipelines, regardless of the `raise_on_error` flag. See - https://groups.google.com/forum/?hl=en&fromgroups=#!topic/redis-db/VUiEFT8U8U0 + for more info. * 2.7.1 - * Packaged tests with source code + * Packaged tests with source code * 2.7.0 - * Added BITOP and BITCOUNT commands. Thanks Mark Tozzi. - * Added the TIME command. Thanks Jason Knight. - * Added support for LUA scripting. Thanks to Angus Peart, Drew Smathers, + * Added BITOP and BITCOUNT commands. Thanks Mark Tozzi. + * Added the TIME command. Thanks Jason Knight. + * Added support for LUA scripting. Thanks to Angus Peart, Drew Smathers, Issac Kelly, Louis-Philippe Perron, Sean Bleier, Jeffrey Kaditz, and Dvir Volk for various patches and contributions to this feature. - * Changed the default error handling in pipelines. By default, the first + * Changed the default error handling in pipelines. By default, the first error in a pipeline will now be raised. A new parameter to the pipeline's execute, `raise_on_error`, can be set to False to keep the old behavior of embeedding the exception instances in the result. - * Fixed a bug with pipelines where parse errors won't corrupt the + * Fixed a bug with pipelines where parse errors won't corrupt the socket. - * Added the optional `number` argument to SRANDMEMBER for use with + * Added the optional `number` argument to SRANDMEMBER for use with Redis 2.6+ servers. - * Added PEXPIRE/PEXPIREAT/PTTL commands. Thanks Luper Rouch. - * Added INCRBYFLOAT/HINCRBYFLOAT commands. Thanks Nikita Uvarov. - * High precision floating point values won't lose their precision when + * Added PEXPIRE/PEXPIREAT/PTTL commands. Thanks Luper Rouch. + * Added INCRBYFLOAT/HINCRBYFLOAT commands. Thanks Nikita Uvarov. + * High precision floating point values won't lose their precision when being sent to the Redis server. Thanks Jason Oster and Oleg Pudeyev. - * Added CLIENT LIST/CLIENT KILL commands + * Added CLIENT LIST/CLIENT KILL commands * 2.6.2 - * `from_url` is now available as a classmethod on client classes. Thanks + * `from_url` is now available as a classmethod on client classes. Thanks Jon Parise for the patch. - * Fixed several encoding errors resulting from the Python 3.x support. + * Fixed several encoding errors resulting from the Python 3.x support. * 2.6.1 - * Python 3.x support! Big thanks to Alex Grönholm. - * Fixed a bug in the PythonParser's read_response that could hide an error + * Python 3.x support! Big thanks to Alex Grönholm. + * Fixed a bug in the PythonParser's read_response that could hide an error from the client (#251). * 2.6.0 - * Changed (p)subscribe and (p)unsubscribe to no longer return messages + * Changed (p)subscribe and (p)unsubscribe to no longer return messages indicating the channel was subscribed/unsubscribed to. These messages are available in the listen() loop instead. This is to prevent the following scenario: - * Client A is subscribed to "foo" - * Client B publishes message to "foo" - * Client A subscribes to channel "bar" at the same time. + * Client A is subscribed to "foo" + * Client B publishes message to "foo" + * Client A subscribes to channel "bar" at the same time. Prior to this change, the subscribe() call would return the published messages on "foo" rather than the subscription confirmation to "bar". - * Added support for GETRANGE, thanks Jean-Philippe Caruana - * A new setting "decode_responses" specifies whether return values from + * Added support for GETRANGE, thanks Jean-Philippe Caruana + * A new setting "decode_responses" specifies whether return values from Redis commands get decoded automatically using the client's charset value. Thanks to Frankie Dintino for the patch. * 2.4.13 - * redis.from_url() can take an URL representing a Redis connection string + * redis.from_url() can take an URL representing a Redis connection string and return a client object. Thanks Kenneth Reitz for the patch. * 2.4.12 - * ConnectionPool is now fork-safe. Thanks Josiah Carson for the patch. + * ConnectionPool is now fork-safe. Thanks Josiah Carson for the patch. * 2.4.11 - * AuthenticationError will now be correctly raised if an invalid password + * AuthenticationError will now be correctly raised if an invalid password is supplied. - * If Hiredis is unavailable, the HiredisParser will raise a RedisError + * If Hiredis is unavailable, the HiredisParser will raise a RedisError if selected manually. - * Made the INFO command more tolerant of Redis changes formatting. Fix + * Made the INFO command more tolerant of Redis changes formatting. Fix for #217. * 2.4.10 - * Buffer reads from socket in the PythonParser. Fix for a Windows-specific + * Buffer reads from socket in the PythonParser. Fix for a Windows-specific bug (#205). - * Added the OBJECT and DEBUG OBJECT commands. - * Added __del__ methods for classes that hold on to resources that need to + * Added the OBJECT and DEBUG OBJECT commands. + * Added __del__ methods for classes that hold on to resources that need to be cleaned up. This should prevent resource leakage when these objects leave scope due to misuse or unhandled exceptions. Thanks David Wolever for the suggestion. - * Added the ECHO command for completeness. - * Fixed a bug where attempting to subscribe to a PubSub channel of a Redis + * Added the ECHO command for completeness. + * Fixed a bug where attempting to subscribe to a PubSub channel of a Redis server that's down would blow out the stack. Fixes #179 and #195. Thanks Ovidiu Predescu for the test case. - * StrictRedis's TTL command now returns a -1 when querying a key with no + * StrictRedis's TTL command now returns a -1 when querying a key with no expiration. The Redis class continues to return None. - * ZADD and SADD now return integer values indicating the number of items + * ZADD and SADD now return integer values indicating the number of items added. Thanks Homer Strong. - * Renamed the base client class to StrictRedis, replacing ZADD and LREM in + * Renamed the base client class to StrictRedis, replacing ZADD and LREM in favor of their official argument order. The Redis class is now a subclass of StrictRedis, implementing the legacy redis-py implementations of ZADD and LREM. Docs have been updated to suggesting the use of StrictRedis. - * SETEX in StrictRedis is now compliant with official Redis SETEX command. + * SETEX in StrictRedis is now compliant with official Redis SETEX command. the name, value, time implementation moved to "Redis" for backwards compatibility. * 2.4.9 - * Removed socket retry logic in Connection. This is the responsibility of + * Removed socket retry logic in Connection. This is the responsibility of the caller to determine if the command is safe and can be retried. Thanks David Wolver. - * Added some extra guards around various types of exceptions being raised + * Added some extra guards around various types of exceptions being raised when sending or parsing data. Thanks David Wolver and Denis Bilenko. * 2.4.8 - * Imported with_statement from __future__ for Python 2.5 compatibility. + * Imported with_statement from __future__ for Python 2.5 compatibility. * 2.4.7 - * Fixed a bug where some connections were not getting released back to the + * Fixed a bug where some connections were not getting released back to the connection pool after pipeline execution. - * Pipelines can now be used as context managers. This is the preferred way + * Pipelines can now be used as context managers. This is the preferred way of use to ensure that connections get cleaned up properly. Thanks David Wolever. - * Added a convenience method called transaction() on the base Redis class. + * Added a convenience method called transaction() on the base Redis class. This method eliminates much of the boilerplate used when using pipelines to watch Redis keys. See the documentation for details on usage. * 2.4.6 - * Variadic arguments for SADD, SREM, ZREN, HDEL, LPUSH, and RPUSH. Thanks + * Variadic arguments for SADD, SREM, ZREN, HDEL, LPUSH, and RPUSH. Thanks Raphaël Vinot. - * (CRITICAL) Fixed an error in the Hiredis parser that occasionally caused + * (CRITICAL) Fixed an error in the Hiredis parser that occasionally caused the socket connection to become corrupted and unusable. This became noticeable once connection pools started to be used. - * ZRANGE, ZREVRANGE, ZRANGEBYSCORE, and ZREVRANGEBYSCORE now take an + * ZRANGE, ZREVRANGE, ZRANGEBYSCORE, and ZREVRANGEBYSCORE now take an additional optional argument, score_cast_func, which is a callable used to cast the score value in the return type. The default is float. - * Removed the PUBLISH method from the PubSub class. Connections that are + * Removed the PUBLISH method from the PubSub class. Connections that are [P]SUBSCRIBEd cannot issue PUBLISH commands, so it doesn't make sense to have it here. - * Pipelines now contain WATCH and UNWATCH. Calling WATCH or UNWATCH from + * Pipelines now contain WATCH and UNWATCH. Calling WATCH or UNWATCH from the base client class will result in a deprecation warning. After WATCHing one or more keys, the pipeline will be placed in immediate execution mode until UNWATCH or MULTI are called. Refer to the new pipeline docs in the README for more information. Thanks to David Wolever and Randall Leeds for greatly helping with this. * 2.4.5 - * The PythonParser now works better when reading zero length strings. + * The PythonParser now works better when reading zero length strings. * 2.4.4 - * Fixed a typo introduced in 2.4.3 + * Fixed a typo introduced in 2.4.3 * 2.4.3 - * Fixed a bug in the UnixDomainSocketConnection caused when trying to + * Fixed a bug in the UnixDomainSocketConnection caused when trying to form an error message after a socket error. * 2.4.2 - * Fixed a bug in pipeline that caused an exception while trying to + * Fixed a bug in pipeline that caused an exception while trying to reconnect after a connection timeout. * 2.4.1 - * Fixed a bug in the PythonParser if disconnect is called before connect. + * Fixed a bug in the PythonParser if disconnect is called before connect. * 2.4.0 - * WARNING: 2.4 contains several backwards incompatible changes. - * Completely refactored Connection objects. Moved much of the Redis + * WARNING: 2.4 contains several backwards incompatible changes. + * Completely refactored Connection objects. Moved much of the Redis protocol packing for requests here, and eliminated the nasty dependencies it had on the client to do AUTH and SELECT commands on connect. - * Connection objects now have a parser attribute. Parsers are responsible + * Connection objects now have a parser attribute. Parsers are responsible for reading data Redis sends. Two parsers ship with redis-py: a PythonParser and the HiRedis parser. redis-py will automatically use the HiRedis parser if you have the Python hiredis module installed, otherwise it will fall back to the PythonParser. You can force or the other, or even an external one by passing the `parser_class` argument to ConnectionPool. - * Added a UnixDomainSocketConnection for users wanting to talk to the Redis + * Added a UnixDomainSocketConnection for users wanting to talk to the Redis instance running on a local machine only. You can use this connection by passing it to the `connection_class` argument of the ConnectionPool. - * Connections no longer derive from threading.local. See threading.local + * Connections no longer derive from threading.local. See threading.local note below. - * ConnectionPool has been completely refactored. The ConnectionPool now + * ConnectionPool has been completely refactored. The ConnectionPool now maintains a list of connections. The redis-py client only hangs on to a ConnectionPool instance, calling get_connection() anytime it needs to send a command. When get_connection() is called, the command name and @@ -726,83 +729,84 @@ belong to and return a connection to it. ConnectionPool also implements disconnect() to force all connections in the pool to disconnect from the Redis server. - * redis-py no longer support the SELECT command. You can still connect to + * redis-py no longer support the SELECT command. You can still connect to a specific database by specifying it when instantiating a client instance or by creating a connection pool. If you need to talk to multiple databases within your application, you should use a separate client instance for each database you want to talk to. - * Completely refactored Publish/Subscribe support. The subscribe and listen + * Completely refactored Publish/Subscribe support. The subscribe and listen commands are no longer available on the redis-py Client class. Instead, the `pubsub` method returns an instance of the PubSub class which contains all publish/subscribe support. Note, you can still PUBLISH from the redis-py client class if you desire. - * Removed support for all previously deprecated commands or options. - * redis-py no longer uses threading.local in any way. Since the Client + * Removed support for all previously deprecated commands or options. + * redis-py no longer uses threading.local in any way. Since the Client class no longer holds on to a connection, it's no longer needed. You can now pass client instances between threads, and commands run on those threads will retrieve an available connection from the pool, use it and release it. It should now be trivial to use redis-py with eventlet or greenlet. - * ZADD now accepts pairs of value=score keyword arguments. This should help + * ZADD now accepts pairs of value=score keyword arguments. This should help resolve the long standing #72. The older value and score arguments have been deprecated in favor of the keyword argument style. - * Client instances now get their own copy of RESPONSE_CALLBACKS. The new + * Client instances now get their own copy of RESPONSE_CALLBACKS. The new set_response_callback method adds a user defined callback to the instance. - * Support Jython, fixing #97. Thanks to Adam Vandenberg for the patch. - * Using __getitem__ now properly raises a KeyError when the key is not + * Support Jython, fixing #97. Thanks to Adam Vandenberg for the patch. + * Using __getitem__ now properly raises a KeyError when the key is not found. Thanks Ionuț Arțăriși for the patch. - * Newer Redis versions return a LOADING message for some commands while + * Newer Redis versions return a LOADING message for some commands while the database is loading from disk during server start. This could cause problems with SELECT. We now force a socket disconnection prior to raising a ResponseError so subsequent connections have to reconnect and re-select the appropriate database. Thanks to Benjamin Anderson for finding this and fixing. * 2.2.4 - * WARNING: Potential backwards incompatible change - Changed order of + * WARNING: Potential backwards incompatible change - Changed order of parameters of ZREVRANGEBYSCORE to match those of the actual Redis command. This is only backwards-incompatible if you were passing max and min via keyword args. If passing by normal args, nothing in user code should have to change. Thanks Stéphane Angel for the fix. - * Fixed INFO to properly parse the Redis data correctly for both 2.2.x and + * Fixed INFO to properly parse the Redis data correctly for both 2.2.x and 2.3+. Thanks Stéphane Angel for the fix. - * Lock objects now store their timeout value as a float. This allows floats + * Lock objects now store their timeout value as a float. This allows floats to be used as timeout values. No changes to existing code required. - * WATCH now supports multiple keys. Thanks Rich Schumacher. - * Broke out some code that was Python 2.4 incompatible. redis-py should + * WATCH now supports multiple keys. Thanks Rich Schumacher. + * Broke out some code that was Python 2.4 incompatible. redis-py should now be usable on 2.4, but this hasn't actually been tested. Thanks Dan Colish for the patch. - * Optimized some code using izip and islice. Should have a pretty good + * Optimized some code using izip and islice. Should have a pretty good speed up on larger data sets. Thanks Dan Colish. - * Better error handling when submitting an empty mapping to HMSET. Thanks + * Better error handling when submitting an empty mapping to HMSET. Thanks Dan Colish. - * Subscription status is now reset after every (re)connection. + * Subscription status is now reset after every (re)connection. * 2.2.3 - * Added support for Hiredis. To use, simply "pip install hiredis" or + * Added support for Hiredis. To use, simply "pip install hiredis" or "easy_install hiredis". Thanks for Pieter Noordhuis for the hiredis-py bindings and the patch to redis-py. - * The connection class is chosen based on whether hiredis is installed + * The connection class is chosen based on whether hiredis is installed or not. To force the use of the PythonConnection, simply create your own ConnectionPool instance with the connection_class argument assigned to to PythonConnection class. - * Added missing command ZREVRANGEBYSCORE. Thanks Jay Baird for the patch. - * The INFO command should be parsed correctly on 2.2.x server versions + * Added missing command ZREVRANGEBYSCORE. Thanks Jay Baird for the patch. + * The INFO command should be parsed correctly on 2.2.x server versions and is backwards compatible with older versions. Thanks Brett Hoerner. * 2.2.2 - * Fixed a bug in ZREVRANK where retrieving the rank of a value not in + * Fixed a bug in ZREVRANK where retrieving the rank of a value not in the zset would raise an error. - * Fixed a bug in Connection.send where the errno import was getting + * Fixed a bug in Connection.send where the errno import was getting overwritten by a local variable. - * Fixed a bug in SLAVEOF when promoting an existing slave to a master. - * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's + * Fixed a bug in SLAVEOF when promoting an existing slave to a master. + * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's change of this actually broke Pypi for Pip installs. Sorry! * 2.2.1 - * Changed archive name to redis-py-VERSION.tar.gz to not conflict + * Changed archive name to redis-py-VERSION.tar.gz to not conflict with the Redis server archive. * 2.2.0 - * Implemented SLAVEOF - * Implemented CONFIG as config_get and config_set - * Implemented GETBIT/SETBIT - * Implemented BRPOPLPUSH - * Implemented STRLEN - * Implemented PERSIST - * Implemented SETRANGE + * Implemented SLAVEOF + * Implemented CONFIG as config_get and config_set + * Implemented GETBIT/SETBIT + * Implemented BRPOPLPUSH + * Implemented STRLEN + * Implemented PERSIST + * Implemented SETRANGE + * Changed type annotation of the `num` parameter in `zrange` from `int` to `Optional[int] \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..f9b53ac4b6 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,173 @@ +# Contributing + +## Introduction + +We appreciate your interest in considering contributing to redis-py. +Community contributions mean a lot to us. + +## Contributions we need + +You may already know how you'd like to contribute, whether it's a fix for a bug you +encountered, or a new feature your team wants to use. + +If you don't know where to start, consider improving +documentation, bug triaging, and writing tutorials are all examples of +helpful contributions that mean less work for you. + +## Your First Contribution + +Unsure where to begin contributing? You can start by looking through +[help-wanted +issues](https://github.com/andymccurdy/redis-py/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted). + +Never contributed to open source before? Here are a couple of friendly +tutorials: + +- +- + +## Getting Started + +Here's how to get started with your code contribution: + +1. Create your own fork of redis-py +2. Do the changes in your fork +3. Create a virtualenv and install the development dependencies from the dev_requirements.txt file: + ``` + python -m venv .venv + source .venv/bin/activate + pip install -r dev_requirements.txt + pip install -e .[jwt] + ``` + +4. If you need a development environment, run `invoke devenv`. Note: this relies on docker-compose to build environments, and assumes that you have a version supporting [docker profiles](https://docs.docker.com/compose/profiles/). +5. While developing, make sure the tests pass by running `invoke tests` +6. If you like the change and think the project could use it, send a + pull request + +To see what else is part of the automation, run `invoke -l` + +## The Development Environment + +Running `invoke devenv` installs the development dependencies specified +in the dev_requirements.txt. It starts all of the dockers used by this +project, and leaves them running. These can be easily cleaned up with +`invoke clean`. NOTE: it is assumed that the user running these tests, +can execute docker and its various commands. + +- A master Redis node +- A Redis replica node +- Three sentinel Redis nodes +- A redis cluster +- An stunnel docker, fronting the master Redis node + +The replica node, is a replica of the master node, using the +[leader-follower replication](https://redis.io/topics/replication) +feature. + +The sentinels monitor the master node in a [sentinel high-availability +configuration](https://redis.io/topics/sentinel). + +## Testing + +Call `invoke tests` to run all tests, or `invoke all-tests` to run linters +tests as well. With the 'tests' and 'all-tests' targets, all Redis and +RedisCluster tests will be run. + +It is possible to run only Redis client tests (with cluster mode disabled) by +using `invoke standalone-tests`; similarly, RedisCluster tests can be run by using +`invoke cluster-tests`. + +Each run of tests starts and stops the various dockers required. Sometimes +things get stuck, an `invoke clean` can help. + +## Linting + +Call `invoke linters` to run linters without also running tests. + +## Documentation + +If relevant, update the code documentation, via docstrings, or in `/docs`. + +You can check how the documentation looks locally by running `invoke build-docs` +and loading the generated HTML files in a browser. + +Historically there is a mix of styles in the docstrings, but the preferred way +of documenting code is by applying the +[Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). +Type hints should be added according to PEP484, and should not be repeated in +the docstrings. + +### Docker Tips + +Following are a few tips that can help you work with the Docker-based +development environment. + +To get a bash shell inside of a container: + +`$ docker run -it /bin/bash` + +Containers run a minimal Debian image that probably lacks tools you want +to use. To install packages, first get a bash session (see previous tip) +and then run: + +`$ apt update && apt install ` + +You can see the logging output of a containers like this: + +`$ docker logs -f ` + +### Troubleshooting + +If you get any errors when running `make dev` or `make test`, make sure +that you are using supported versions of Docker. + +Please try at least versions of Docker. + +- Docker 19.03.12 + +## How to Report a Bug + +### Security Vulnerabilities + +**NOTE**: If you find a security vulnerability, do NOT open an issue. +Email [Redis Open Source ()](mailto:oss@redis.com) instead. + +In order to determine whether you are dealing with a security issue, ask +yourself these two questions: + +- Can I access something that's not mine, or something I shouldn't + have access to? +- Can I disable something for other people? + +If the answer to either of those two questions are *yes*, then you're +probably dealing with a security issue. Note that even if you answer +*no* to both questions, you may still be dealing with a security +issue, so if you're unsure, just email [us](mailto:oss@redis.com). + +### Everything Else + +When filing an issue, make sure to answer these five questions: + +1. What version of redis-py are you using? +2. What version of redis are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +## Suggest a feature or enhancement + +If you'd like to contribute a new feature, make sure you check our +issue list to see if someone has already proposed it. Work may already +be underway on the feature you want or we may have rejected a +feature like it already. + +If you don't see anything, open a new issue that describes the feature +you would like and how it should work. + +## Code review process + +The core team regularly looks at pull requests. We will provide +feedback as soon as possible. After receiving our feedback, please respond +within two weeks. After that time, we may close your PR if it isn't +showing any activity. diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 951f7dea8a..0000000000 --- a/INSTALL +++ /dev/null @@ -1,6 +0,0 @@ - -Please use - python setup.py install - -and report errors to Andy McCurdy (sedrik@gmail.com) - diff --git a/LICENSE b/LICENSE index 29a3fe3845..8509ccd678 100644 --- a/LICENSE +++ b/LICENSE @@ -1,22 +1,21 @@ -Copyright (c) 2012 Andy McCurdy +MIT License - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: +Copyright (c) 2022-2023, Redis, inc. - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 7aaee12a1d..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include CHANGES -include INSTALL -include LICENSE -include README.rst -exclude __pycache__ -recursive-include tests * -recursive-exclude tests *.pyc diff --git a/README.md b/README.md new file mode 100644 index 0000000000..614f79f592 --- /dev/null +++ b/README.md @@ -0,0 +1,223 @@ +# redis-py + +The Python interface to the Redis key-value store. + +[![CI](https://github.com/redis/redis-py/workflows/CI/badge.svg?branch=master)](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster) +[![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis.readthedocs.io/en/stable/) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) +[![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/) +[![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases) +[![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py) + +[Installation](#installation) | [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md) + +--------------------------------------------- + +**Note:** redis-py 5.0 is the last version of redis-py that supports Python 3.7, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 5.1 supports Python 3.8+.
+**Note:** redis-py 6.1.0 is the last version of redis-py that supports Python 3.8, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 6.2.0 supports Python 3.9+. + +--------------------------------------------- + +## How do I Redis? + +[Learn for free at Redis University](https://redis.io/learn/university) + +[Try the Redis Cloud](https://redis.io/try-free/) + +[Dive in developer tutorials](https://redis.io/learn) + +[Join the Redis community](https://redis.io/community/) + +[Work at Redis](https://redis.io/careers/) + +## Installation + +Start a redis via docker (for Redis versions >= 8.0): + +``` bash +docker run -p 6379:6379 -it redis:latest +``` + +Start a redis via docker (for Redis versions < 8.0): + +``` bash +docker run -p 6379:6379 -it redis/redis-stack:latest +``` +To install redis-py, simply: + +``` bash +$ pip install redis +``` + +For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes. +By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing. + +``` bash +$ pip install "redis[hiredis]" +``` + +Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)! + +## Supported Redis Versions + +The most recent version of this library supports Redis version [7.2](https://github.com/redis/redis/blob/7.2/00-RELEASENOTES), [7.4](https://github.com/redis/redis/blob/7.4/00-RELEASENOTES), [8.0](https://github.com/redis/redis/blob/8.0/00-RELEASENOTES) and [8.2](https://github.com/redis/redis/blob/8.2/00-RELEASENOTES). + +The table below highlights version compatibility of the most-recent library versions and redis versions. + +| Library version | Supported redis versions | +|-----------------|-------------------| +| 3.5.3 | <= 6.2 Family of releases | +| >= 4.5.0 | Version 5.0 to 7.0 | +| >= 5.0.0 | Version 5.0 to 7.4 | +| >= 6.0.0 | Version 7.2 to current | + + +## Usage + +### Basic Example + +``` python +>>> import redis +>>> r = redis.Redis(host='localhost', port=6379, db=0) +>>> r.set('foo', 'bar') +True +>>> r.get('foo') +b'bar' +``` + +The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*. For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html). + + +#### RESP3 Support +To enable support for RESP3, ensure you have at least version 5.0 of the client, and change your connection object to include *protocol=3* + +``` python +>>> import redis +>>> r = redis.Redis(host='localhost', port=6379, db=0, protocol=3) +``` + +### Connection Pools + +By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools). + +``` python +>>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) +>>> r = redis.Redis(connection_pool=pool) +``` + +Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client). + +### Redis Commands + +There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html). + +## Advanced Topics + +The [official Redis command documentation](https://redis.io/commands) +does a great job of explaining each command in detail. redis-py attempts +to adhere to the official command syntax. There are a few exceptions: + +- **MULTI/EXEC**: These are implemented as part of the Pipeline class. + The pipeline is wrapped with the MULTI and EXEC statements by + default when it is executed, which can be disabled by specifying + transaction=False. See more about Pipelines below. + +- **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as + a separate class as it places the underlying connection in a state + where it can\'t execute non-pubsub commands. Calling the pubsub + method from the Redis client will return a PubSub instance where you + can subscribe to channels and listen for messages. You can only call + PUBLISH from the Redis client (see [this comment on issue + #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015) + for details). + +For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html). + +### Pipelines + +The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list. + + +``` python +>>> pipe = r.pipeline() +>>> pipe.set('foo', 5) +>>> pipe.set('bar', 18.5) +>>> pipe.set('blee', "hello world!") +>>> pipe.execute() +[True, True, True] +``` + +### PubSub + +The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels. + +``` python +>>> r = redis.Redis(...) +>>> p = r.pubsub() +>>> p.subscribe('my-first-channel', 'my-second-channel', ...) +>>> p.get_message() +{'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} +``` + +### Redis’ search and query capabilities default dialect + +Release 6.0.0 introduces a client-side default dialect for Redis’ search and query capabilities. +By default, the client now overrides the server-side dialect with version 2, automatically appending *DIALECT 2* to commands like *FT.AGGREGATE* and *FT.SEARCH*. + +**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by configuring the client accordingly. + +``` python +>>> from redis.commands.search.field import TextField +>>> from redis.commands.search.query import Query +>>> from redis.commands.search.index_definition import IndexDefinition +>>> import redis + +>>> r = redis.Redis(host='localhost', port=6379, db=0) +>>> r.ft().create_index( +>>> (TextField("name"), TextField("lastname")), +>>> definition=IndexDefinition(prefix=["test:"]), +>>> ) + +>>> r.hset("test:1", "name", "James") +>>> r.hset("test:1", "lastname", "Brown") + +>>> # Query with default DIALECT 2 +>>> query = "@name: James Brown" +>>> q = Query(query) +>>> res = r.ft().search(q) + +>>> # Query with explicit DIALECT 1 +>>> query = "@name: James Brown" +>>> q = Query(query).dialect(1) +>>> res = r.ft().search(q) +``` + +You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/). + +### Multi-database client (Active-Active) + +The multi-database client allows your application to connect to multiple Redis databases, which are typically replicas of each other. It is designed to work with Redis Software and Redis Cloud Active-Active setups. The client continuously monitors database health, detects failures, and automatically fails over to the next healthy database using a configurable strategy. When the original database becomes healthy again, the client can automatically switch back to it.
+This is useful when: + +1. You have more than one Redis deployment. This might include two independent Redis servers or two or more Redis databases replicated across multiple [active-active Redis Enterprise](https://redis.io/docs/latest/operate/rs/databases/active-active/) clusters. +2. You want your application to connect to one deployment at a time and to fail over to the next available deployment if the first deployment becomes unavailable. + +For the complete failover configuration options and examples, see the [Multi-database client docs](https://redis.readthedocs.io/en/latest/multi_database.html). + +--------------------------------------------- + +### Author + +redis-py is developed and maintained by [Redis Inc](https://redis.io). It can be found [here]( +https://github.com/redis/redis-py), or downloaded from [pypi](https://pypi.org/project/redis/). + +Special thanks to: + +- Andy McCurdy () the original author of redis-py. +- Ludovico Magnocavallo, author of the original Python Redis client, + from which some of the socket code is still used. +- Alexander Solovyov for ideas on the generic response callback + system. +- Paul Hubbard for initial packaging support. + +[![Redis](./docs/_static/logo-redis.svg)](https://redis.io) diff --git a/README.rst b/README.rst deleted file mode 100644 index 3f8de91466..0000000000 --- a/README.rst +++ /dev/null @@ -1,894 +0,0 @@ -redis-py -======== - -The Python interface to the Redis key-value store. - -.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.svg?branch=master - :target: https://travis-ci.org/andymccurdy/redis-py -.. image:: https://readthedocs.org/projects/redis-py/badge/?version=stable&style=flat - :target: https://redis-py.readthedocs.io/en/stable/ -.. image:: https://badge.fury.io/py/redis.svg - :target: https://pypi.org/project/redis/ -.. image:: https://codecov.io/gh/andymccurdy/redis-py/branch/master/graph/badge.svg - :target: https://codecov.io/gh/andymccurdy/redis-py - - -Python 2 Compatibility Note ---------------------------- - -redis-py 3.5.x will be the last version of redis-py that supports Python 2. -The 3.5.x line will continue to get bug fixes and security patches that -support Python 2 until August 1, 2020. redis-py 4.0 will be the next major -version and will require Python 3.5+. - - -Installation ------------- - -redis-py requires a running Redis server. See `Redis's quickstart -`_ for installation instructions. - -redis-py can be installed using `pip` similar to other Python packages. Do not use `sudo` -with `pip`. It is usually good to work in a -`virtualenv `_ or -`venv `_ to avoid conflicts with other package -managers and Python projects. For a quick introduction see -`Python Virtual Environments in Five Minutes `_. - -To install redis-py, simply: - -.. code-block:: bash - - $ pip install redis - -or from source: - -.. code-block:: bash - - $ python setup.py install - - -Getting Started ---------------- - -.. code-block:: pycon - - >>> import redis - >>> r = redis.Redis(host='localhost', port=6379, db=0) - >>> r.set('foo', 'bar') - True - >>> r.get('foo') - b'bar' - -By default, all responses are returned as `bytes` in Python 3 and `str` in -Python 2. The user is responsible for decoding to Python 3 strings or Python 2 -unicode objects. - -If **all** string responses from a client should be decoded, the user can -specify `decode_responses=True` to `Redis.__init__`. In this case, any -Redis command that returns a string type will be decoded with the `encoding` -specified. - - -Upgrading from redis-py 2.X to 3.0 ----------------------------------- - -redis-py 3.0 introduces many new features but required a number of backwards -incompatible changes to be made in the process. This section attempts to -provide an upgrade path for users migrating from 2.X to 3.0. - - -Python Version Support -^^^^^^^^^^^^^^^^^^^^^^ - -redis-py 3.0 supports Python 2.7 and Python 3.5+. - - -Client Classes: Redis and StrictRedis -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -redis-py 3.0 drops support for the legacy "Redis" client class. "StrictRedis" -has been renamed to "Redis" and an alias named "StrictRedis" is provided so -that users previously using "StrictRedis" can continue to run unchanged. - -The 2.X "Redis" class provided alternative implementations of a few commands. -This confused users (rightfully so) and caused a number of support issues. To -make things easier going forward, it was decided to drop support for these -alternate implementations and instead focus on a single client class. - -2.X users that are already using StrictRedis don't have to change the class -name. StrictRedis will continue to work for the foreseeable future. - -2.X users that are using the Redis class will have to make changes if they -use any of the following commands: - -* SETEX: The argument order has changed. The new order is (name, time, value). -* LREM: The argument order has changed. The new order is (name, num, value). -* TTL and PTTL: The return value is now always an int and matches the - official Redis command (>0 indicates the timeout, -1 indicates that the key - exists but that it has no expire time set, -2 indicates that the key does - not exist) - - -SSL Connections -^^^^^^^^^^^^^^^ - -redis-py 3.0 changes the default value of the `ssl_cert_reqs` option from -`None` to `'required'`. See -`Issue 1016 `_. This -change enforces hostname validation when accepting a cert from a remote SSL -terminator. If the terminator doesn't properly set the hostname on the cert -this will cause redis-py 3.0 to raise a ConnectionError. - -This check can be disabled by setting `ssl_cert_reqs` to `None`. Note that -doing so removes the security check. Do so at your own risk. - -It has been reported that SSL certs received from AWS ElastiCache do not have -proper hostnames and turning off hostname verification is currently required. - - -MSET, MSETNX and ZADD -^^^^^^^^^^^^^^^^^^^^^ - -These commands all accept a mapping of key/value pairs. In redis-py 2.X -this mapping could be specified as ``*args`` or as ``**kwargs``. Both of these -styles caused issues when Redis introduced optional flags to ZADD. Relying on -``*args`` caused issues with the optional argument order, especially in Python -2.7. Relying on ``**kwargs`` caused potential collision issues of user keys with -the argument names in the method signature. - -To resolve this, redis-py 3.0 has changed these three commands to all accept -a single positional argument named mapping that is expected to be a dict. For -MSET and MSETNX, the dict is a mapping of key-names -> values. For ZADD, the -dict is a mapping of element-names -> score. - -MSET, MSETNX and ZADD now look like: - -.. code-block:: python - - def mset(self, mapping): - def msetnx(self, mapping): - def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): - -All 2.X users that use these commands must modify their code to supply -keys and values as a dict to these commands. - - -ZINCRBY -^^^^^^^ - -redis-py 2.X accidentally modified the argument order of ZINCRBY, swapping the -order of value and amount. ZINCRBY now looks like: - -.. code-block:: python - - def zincrby(self, name, amount, value): - -All 2.X users that rely on ZINCRBY must swap the order of amount and value -for the command to continue to work as intended. - - -Encoding of User Input -^^^^^^^^^^^^^^^^^^^^^^ - -redis-py 3.0 only accepts user data as bytes, strings or numbers (ints, longs -and floats). Attempting to specify a key or a value as any other type will -raise a DataError exception. - -redis-py 2.X attempted to coerce any type of input into a string. While -occasionally convenient, this caused all sorts of hidden errors when users -passed boolean values (which were coerced to 'True' or 'False'), a None -value (which was coerced to 'None') or other values, such as user defined -types. - -All 2.X users should make sure that the keys and values they pass into -redis-py are either bytes, strings or numbers. - - -Locks -^^^^^ - -redis-py 3.0 drops support for the pipeline-based Lock and now only supports -the Lua-based lock. In doing so, LuaLock has been renamed to Lock. This also -means that redis-py Lock objects require Redis server 2.6 or greater. - -2.X users that were explicitly referring to "LuaLock" will have to now refer -to "Lock" instead. - - -Locks as Context Managers -^^^^^^^^^^^^^^^^^^^^^^^^^ - -redis-py 3.0 now raises a LockError when using a lock as a context manager and -the lock cannot be acquired within the specified timeout. This is more of a -bug fix than a backwards incompatible change. However, given an error is now -raised where none was before, this might alarm some users. - -2.X users should make sure they're wrapping their lock code in a try/catch -like this: - -.. code-block:: python - - try: - with r.lock('my-lock-key', blocking_timeout=5) as lock: - # code you want executed only after the lock has been acquired - except LockError: - # the lock wasn't acquired - - -API Reference -------------- - -The `official Redis command documentation `_ does a -great job of explaining each command in detail. redis-py attempts to adhere -to the official command syntax. There are a few exceptions: - -* **SELECT**: Not implemented. See the explanation in the Thread Safety section - below. -* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py - uses 'delete' instead. -* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The - pipeline is wrapped with the MULTI and EXEC statements by default when it - is executed, which can be disabled by specifying transaction=False. - See more about Pipelines below. -* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate - class as it places the underlying connection in a state where it can't - execute non-pubsub commands. Calling the pubsub method from the Redis client - will return a PubSub instance where you can subscribe to channels and listen - for messages. You can only call PUBLISH from the Redis client (see - `this comment on issue #151 - `_ - for details). -* **SCAN/SSCAN/HSCAN/ZSCAN**: The \*SCAN commands are implemented as they - exist in the Redis documentation. In addition, each command has an equivalent - iterator method. These are purely for convenience so the user doesn't have - to keep track of the cursor while iterating. Use the - scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. - - -More Detail ------------ - -Connection Pools -^^^^^^^^^^^^^^^^ - -Behind the scenes, redis-py uses a connection pool to manage connections to -a Redis server. By default, each Redis instance you create will in turn create -its own connection pool. You can override this behavior and use an existing -connection pool by passing an already created connection pool instance to the -connection_pool argument of the Redis class. You may choose to do this in order -to implement client side sharding or have fine-grain control of how -connections are managed. - -.. code-block:: pycon - - >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) - >>> r = redis.Redis(connection_pool=pool) - -Connections -^^^^^^^^^^^ - -ConnectionPools manage a set of Connection instances. redis-py ships with two -types of Connections. The default, Connection, is a normal TCP socket based -connection. The UnixDomainSocketConnection allows for clients running on the -same device as the server to connect via a unix domain socket. To use a -UnixDomainSocketConnection connection, simply pass the unix_socket_path -argument, which is a string to the unix domain socket file. Additionally, make -sure the unixsocket parameter is defined in your redis.conf file. It's -commented out by default. - -.. code-block:: pycon - - >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') - -You can create your own Connection subclasses as well. This may be useful if -you want to control the socket behavior within an async framework. To -instantiate a client class using your own connection, you need to create -a connection pool, passing your class to the connection_class argument. -Other keyword parameters you pass to the pool will be passed to the class -specified during initialization. - -.. code-block:: pycon - - >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, - your_arg='...', ...) - -Connections maintain an open socket to the Redis server. Sometimes these -sockets are interrupted or disconnected for a variety of reasons. For example, -network appliances, load balancers and other services that sit between clients -and servers are often configured to kill connections that remain idle for a -given threshold. - -When a connection becomes disconnected, the next command issued on that -connection will fail and redis-py will raise a ConnectionError to the caller. -This allows each application that uses redis-py to handle errors in a way -that's fitting for that specific application. However, constant error -handling can be verbose and cumbersome, especially when socket disconnections -happen frequently in many production environments. - -To combat this, redis-py can issue regular health checks to assess the -liveliness of a connection just before issuing a command. Users can pass -``health_check_interval=N`` to the Redis or ConnectionPool classes or -as a query argument within a Redis URL. The value of ``health_check_interval`` -must be an integer. A value of ``0``, the default, disables health checks. -Any positive integer will enable health checks. Health checks are performed -just before a command is executed if the underlying connection has been idle -for more than ``health_check_interval`` seconds. For example, -``health_check_interval=30`` will ensure that a health check is run on any -connection that has been idle for 30 or more seconds just before a command -is executed on that connection. - -If your application is running in an environment that disconnects idle -connections after 30 seconds you should set the ``health_check_interval`` -option to a value less than 30. - -This option also works on any PubSub connection that is created from a -client with ``health_check_interval`` enabled. PubSub users need to ensure -that ``get_message()`` or ``listen()`` are called more frequently than -``health_check_interval`` seconds. It is assumed that most workloads already -do this. - -If your PubSub use case doesn't call ``get_message()`` or ``listen()`` -frequently, you should call ``pubsub.check_health()`` explicitly on a -regularly basis. - -Parsers -^^^^^^^ - -Parser classes provide a way to control how responses from the Redis server -are parsed. redis-py ships with two parser classes, the PythonParser and the -HiredisParser. By default, redis-py will attempt to use the HiredisParser if -you have the hiredis module installed and will fallback to the PythonParser -otherwise. - -Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was -kind enough to create Python bindings. Using Hiredis can provide up to a -10x speed improvement in parsing responses from the Redis server. The -performance increase is most noticeable when retrieving many pieces of data, -such as from LRANGE or SMEMBERS operations. - -Hiredis is available on PyPI, and can be installed via pip just like redis-py. - -.. code-block:: bash - - $ pip install hiredis - -Response Callbacks -^^^^^^^^^^^^^^^^^^ - -The client class uses a set of callbacks to cast Redis responses to the -appropriate Python type. There are a number of these callbacks defined on -the Redis client class in a dictionary called RESPONSE_CALLBACKS. - -Custom callbacks can be added on a per-instance basis using the -set_response_callback method. This method accepts two arguments: a command -name and the callback. Callbacks added in this manner are only valid on the -instance the callback is added to. If you want to define or override a callback -globally, you should make a subclass of the Redis client and add your callback -to its RESPONSE_CALLBACKS class dictionary. - -Response callbacks take at least one parameter: the response from the Redis -server. Keyword arguments may also be accepted in order to further control -how to interpret the response. These keyword arguments are specified during the -command's call to execute_command. The ZRANGE implementation demonstrates the -use of response callback keyword arguments with its "withscores" argument. - -Thread Safety -^^^^^^^^^^^^^ - -Redis client instances can safely be shared between threads. Internally, -connection instances are only retrieved from the connection pool during -command execution, and returned to the pool directly after. Command execution -never modifies state on the client instance. - -However, there is one caveat: the Redis SELECT command. The SELECT command -allows you to switch the database currently in use by the connection. That -database remains selected until another is selected or until the connection is -closed. This creates an issue in that connections could be returned to the pool -that are connected to a different database. - -As a result, redis-py does not implement the SELECT command on client -instances. If you use multiple Redis databases within the same application, you -should create a separate client instance (and possibly a separate connection -pool) for each database. - -It is not safe to pass PubSub or Pipeline objects between threads. - -Pipelines -^^^^^^^^^ - -Pipelines are a subclass of the base Redis class that provide support for -buffering multiple commands to the server in a single request. They can be used -to dramatically increase the performance of groups of commands by reducing the -number of back-and-forth TCP packets between the client and server. - -Pipelines are quite simple to use: - -.. code-block:: pycon - - >>> r = redis.Redis(...) - >>> r.set('bing', 'baz') - >>> # Use the pipeline() method to create a pipeline instance - >>> pipe = r.pipeline() - >>> # The following SET commands are buffered - >>> pipe.set('foo', 'bar') - >>> pipe.get('bing') - >>> # the EXECUTE call sends all buffered commands to the server, returning - >>> # a list of responses, one for each command. - >>> pipe.execute() - [True, b'baz'] - -For ease of use, all commands being buffered into the pipeline return the -pipeline object itself. Therefore calls can be chained like: - -.. code-block:: pycon - - >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() - [True, True, 6] - -In addition, pipelines can also ensure the buffered commands are executed -atomically as a group. This happens by default. If you want to disable the -atomic nature of a pipeline but still want to buffer commands, you can turn -off transactions. - -.. code-block:: pycon - - >>> pipe = r.pipeline(transaction=False) - -A common issue occurs when requiring atomic transactions but needing to -retrieve values in Redis prior for use within the transaction. For instance, -let's assume that the INCR command didn't exist and we need to build an atomic -version of INCR in Python. - -The completely naive implementation could GET the value, increment it in -Python, and SET the new value back. However, this is not atomic because -multiple clients could be doing this at the same time, each getting the same -value from GET. - -Enter the WATCH command. WATCH provides the ability to monitor one or more keys -prior to starting a transaction. If any of those keys change prior the -execution of that transaction, the entire transaction will be canceled and a -WatchError will be raised. To implement our own client-side INCR command, we -could do something like this: - -.. code-block:: pycon - - >>> with r.pipeline() as pipe: - ... while True: - ... try: - ... # put a WATCH on the key that holds our sequence value - ... pipe.watch('OUR-SEQUENCE-KEY') - ... # after WATCHing, the pipeline is put into immediate execution - ... # mode until we tell it to start buffering commands again. - ... # this allows us to get the current value of our sequence - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... # now we can put the pipeline back into buffered mode with MULTI - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - ... # and finally, execute the pipeline (the set command) - ... pipe.execute() - ... # if a WatchError wasn't raised during execution, everything - ... # we just did happened atomically. - ... break - ... except WatchError: - ... # another client must have changed 'OUR-SEQUENCE-KEY' between - ... # the time we started WATCHing it and the pipeline's execution. - ... # our best bet is to just retry. - ... continue - -Note that, because the Pipeline must bind to a single connection for the -duration of a WATCH, care must be taken to ensure that the connection is -returned to the connection pool by calling the reset() method. If the -Pipeline is used as a context manager (as in the example above) reset() -will be called automatically. Of course you can do this the manual way by -explicitly calling reset(): - -.. code-block:: pycon - - >>> pipe = r.pipeline() - >>> while True: - ... try: - ... pipe.watch('OUR-SEQUENCE-KEY') - ... ... - ... pipe.execute() - ... break - ... except WatchError: - ... continue - ... finally: - ... pipe.reset() - -A convenience method named "transaction" exists for handling all the -boilerplate of handling and retrying watch errors. It takes a callable that -should expect a single parameter, a pipeline object, and any number of keys to -be WATCHed. Our client-side INCR command above can be written like this, -which is much easier to read: - -.. code-block:: pycon - - >>> def client_side_incr(pipe): - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - >>> - >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') - [True] - -Be sure to call `pipe.multi()` in the callable passed to `Redis.transaction` -prior to any write commands. - -Publish / Subscribe -^^^^^^^^^^^^^^^^^^^ - -redis-py includes a `PubSub` object that subscribes to channels and listens -for new messages. Creating a `PubSub` object is easy. - -.. code-block:: pycon - - >>> r = redis.Redis(...) - >>> p = r.pubsub() - -Once a `PubSub` instance is created, channels and patterns can be subscribed -to. - -.. code-block:: pycon - - >>> p.subscribe('my-first-channel', 'my-second-channel', ...) - >>> p.psubscribe('my-*', ...) - -The `PubSub` instance is now subscribed to those channels/patterns. The -subscription confirmations can be seen by reading messages from the `PubSub` -instance. - -.. code-block:: pycon - - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2} - >>> p.get_message() - {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3} - -Every message read from a `PubSub` instance will be a dictionary with the -following keys. - -* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', - 'punsubscribe', 'message', 'pmessage' -* **channel**: The channel [un]subscribed to or the channel a message was - published to -* **pattern**: The pattern that matched a published message's channel. Will be - `None` in all cases except for 'pmessage' types. -* **data**: The message data. With [un]subscribe messages, this value will be - the number of channels and patterns the connection is currently subscribed - to. With [p]message messages, this value will be the actual published - message. - -Let's send a message now. - -.. code-block:: pycon - - # the publish method returns the number matching channel and pattern - # subscriptions. 'my-first-channel' matches both the 'my-first-channel' - # subscription and the 'my-*' pattern subscription, so this message will - # be delivered to 2 channels/patterns - >>> r.publish('my-first-channel', 'some data') - 2 - >>> p.get_message() - {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'} - >>> p.get_message() - {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'} - -Unsubscribing works just like subscribing. If no arguments are passed to -[p]unsubscribe, all channels or patterns will be unsubscribed from. - -.. code-block:: pycon - - >>> p.unsubscribe() - >>> p.punsubscribe('my-*') - >>> p.get_message() - {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} - -redis-py also allows you to register callback functions to handle published -messages. Message handlers take a single argument, the message, which is a -dictionary just like the examples above. To subscribe to a channel or pattern -with a message handler, pass the channel or pattern name as a keyword argument -with its value being the callback function. - -When a message is read on a channel or pattern with a message handler, the -message dictionary is created and passed to the message handler. In this case, -a `None` value is returned from get_message() since the message was already -handled. - -.. code-block:: pycon - - >>> def my_handler(message): - ... print('MY HANDLER: ', message['data']) - >>> p.subscribe(**{'my-channel': my_handler}) - # read the subscribe confirmation message - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1} - >>> r.publish('my-channel', 'awesome data') - 1 - # for the message handler to work, we need tell the instance to read data. - # this can be done in several ways (read more below). we'll just use - # the familiar get_message() function for now - >>> message = p.get_message() - MY HANDLER: awesome data - # note here that the my_handler callback printed the string above. - # `message` is None because the message was handled by our handler. - >>> print(message) - None - -If your application is not interested in the (sometimes noisy) -subscribe/unsubscribe confirmation messages, you can ignore them by passing -`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all -subscribe/unsubscribe messages to be read, but they won't bubble up to your -application. - -.. code-block:: pycon - - >>> p = r.pubsub(ignore_subscribe_messages=True) - >>> p.subscribe('my-channel') - >>> p.get_message() # hides the subscribe message and returns None - >>> r.publish('my-channel', 'my data') - 1 - >>> p.get_message() - {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'} - -There are three different strategies for reading messages. - -The examples above have been using `pubsub.get_message()`. Behind the scenes, -`get_message()` uses the system's 'select' module to quickly poll the -connection's socket. If there's data available to be read, `get_message()` will -read it, format the message and return it or pass it to a message handler. If -there's no data to be read, `get_message()` will immediately return None. This -makes it trivial to integrate into an existing event loop inside your -application. - -.. code-block:: pycon - - >>> while True: - >>> message = p.get_message() - >>> if message: - >>> # do something with the message - >>> time.sleep(0.001) # be nice to the system :) - -Older versions of redis-py only read messages with `pubsub.listen()`. listen() -is a generator that blocks until a message is available. If your application -doesn't need to do anything else but receive and act on messages received from -redis, listen() is an easy way to get up an running. - -.. code-block:: pycon - - >>> for message in p.listen(): - ... # do something with the message - -The third option runs an event loop in a separate thread. -`pubsub.run_in_thread()` creates a new thread and starts the event loop. The -thread object is returned to the caller of `run_in_thread()`. The caller can -use the `thread.stop()` method to shut down the event loop and thread. Behind -the scenes, this is simply a wrapper around `get_message()` that runs in a -separate thread, essentially creating a tiny non-blocking event loop for you. -`run_in_thread()` takes an optional `sleep_time` argument. If specified, the -event loop will call `time.sleep()` with the value in each iteration of the -loop. - -Note: Since we're running in a separate thread, there's no way to handle -messages that aren't automatically handled with registered message handlers. -Therefore, redis-py prevents you from calling `run_in_thread()` if you're -subscribed to patterns or channels that don't have message handlers attached. - -.. code-block:: pycon - - >>> p.subscribe(**{'my-channel': my_handler}) - >>> thread = p.run_in_thread(sleep_time=0.001) - # the event loop is now running in the background processing messages - # when it's time to shut it down... - >>> thread.stop() - -A PubSub object adheres to the same encoding semantics as the client instance -it was created from. Any channel or pattern that's unicode will be encoded -using the `charset` specified on the client before being sent to Redis. If the -client's `decode_responses` flag is set the False (the default), the -'channel', 'pattern' and 'data' values in message dictionaries will be byte -strings (str on Python 2, bytes on Python 3). If the client's -`decode_responses` is True, then the 'channel', 'pattern' and 'data' values -will be automatically decoded to unicode strings using the client's `charset`. - -PubSub objects remember what channels and patterns they are subscribed to. In -the event of a disconnection such as a network error or timeout, the -PubSub object will re-subscribe to all prior channels and patterns when -reconnecting. Messages that were published while the client was disconnected -cannot be delivered. When you're finished with a PubSub object, call its -`.close()` method to shutdown the connection. - -.. code-block:: pycon - - >>> p = r.pubsub() - >>> ... - >>> p.close() - - -The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also -supported: - -.. code-block:: pycon - - >>> r.pubsub_channels() - [b'foo', b'bar'] - >>> r.pubsub_numsub('foo', 'bar') - [(b'foo', 9001), (b'bar', 42)] - >>> r.pubsub_numsub('baz') - [(b'baz', 0)] - >>> r.pubsub_numpat() - 1204 - -Monitor -^^^^^^^ -redis-py includes a `Monitor` object that streams every command processed -by the Redis server. Use `listen()` on the `Monitor` object to block -until a command is received. - -.. code-block:: pycon - - >>> r = redis.Redis(...) - >>> with r.monitor() as m: - >>> for command in m.listen(): - >>> print(command) - -Lua Scripting -^^^^^^^^^^^^^ - -redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are -a number of edge cases that make these commands tedious to use in real world -scenarios. Therefore, redis-py exposes a Script object that makes scripting -much easier to use. - -To create a Script instance, use the `register_script` function on a client -instance passing the Lua code as the first argument. `register_script` returns -a Script instance that you can use throughout your code. - -The following trivial Lua script accepts two parameters: the name of a key and -a multiplier value. The script fetches the value stored in the key, multiplies -it with the multiplier value and returns the result. - -.. code-block:: pycon - - >>> r = redis.Redis() - >>> lua = """ - ... local value = redis.call('GET', KEYS[1]) - ... value = tonumber(value) - ... return value * ARGV[1]""" - >>> multiply = r.register_script(lua) - -`multiply` is now a Script instance that is invoked by calling it like a -function. Script instances accept the following optional arguments: - -* **keys**: A list of key names that the script will access. This becomes the - KEYS list in Lua. -* **args**: A list of argument values. This becomes the ARGV list in Lua. -* **client**: A redis-py Client or Pipeline instance that will invoke the - script. If client isn't specified, the client that initially - created the Script instance (the one that `register_script` was - invoked from) will be used. - -Continuing the example from above: - -.. code-block:: pycon - - >>> r.set('foo', 2) - >>> multiply(keys=['foo'], args=[5]) - 10 - -The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is -passed to the script along with the multiplier value of 5. Lua executes the -script and returns the result, 10. - -Script instances can be executed using a different client instance, even one -that points to a completely different Redis server. - -.. code-block:: pycon - - >>> r2 = redis.Redis('redis2.example.com') - >>> r2.set('foo', 3) - >>> multiply(keys=['foo'], args=[5], client=r2) - 15 - -The Script object ensures that the Lua script is loaded into Redis's script -cache. In the event of a NOSCRIPT error, it will load the script and retry -executing it. - -Script objects can also be used in pipelines. The pipeline instance should be -passed as the client argument when calling the script. Care is taken to ensure -that the script is registered in Redis's script cache just prior to pipeline -execution. - -.. code-block:: pycon - - >>> pipe = r.pipeline() - >>> pipe.set('foo', 5) - >>> multiply(keys=['foo'], args=[5], client=pipe) - >>> pipe.execute() - [True, 25] - -Sentinel support -^^^^^^^^^^^^^^^^ - -redis-py can be used together with `Redis Sentinel `_ -to discover Redis nodes. You need to have at least one Sentinel daemon running -in order to use redis-py's Sentinel support. - -Connecting redis-py to the Sentinel instance(s) is easy. You can use a -Sentinel connection to discover the master and slaves network addresses: - -.. code-block:: pycon - - >>> from redis.sentinel import Sentinel - >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) - >>> sentinel.discover_master('mymaster') - ('127.0.0.1', 6379) - >>> sentinel.discover_slaves('mymaster') - [('127.0.0.1', 6380)] - -You can also create Redis client connections from a Sentinel instance. You can -connect to either the master (for write operations) or a slave (for read-only -operations). - -.. code-block:: pycon - - >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) - >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) - >>> master.set('foo', 'bar') - >>> slave.get('foo') - b'bar' - -The master and slave objects are normal Redis instances with their -connection pool bound to the Sentinel instance. When a Sentinel backed client -attempts to establish a connection, it first queries the Sentinel servers to -determine an appropriate host to connect to. If no server is found, -a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are -subclasses of ConnectionError. - -When trying to connect to a slave client, the Sentinel connection pool will -iterate over the list of slaves until it finds one that can be connected to. -If no slaves can be connected to, a connection will be established with the -master. - -See `Guidelines for Redis clients with support for Redis Sentinel -`_ to learn more about Redis Sentinel. - -Scan Iterators -^^^^^^^^^^^^^^ - -The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use. While -these commands are fully supported, redis-py also exposes the following methods -that return Python iterators for convenience: `scan_iter`, `hscan_iter`, -`sscan_iter` and `zscan_iter`. - -.. code-block:: pycon - - >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): - ... r.set(key, value) - >>> for key in r.scan_iter(): - ... print(key, r.get(key)) - A 1 - B 2 - C 3 - -Author -^^^^^^ - -redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). -It can be found here: https://github.com/andymccurdy/redis-py - -Special thanks to: - -* Ludovico Magnocavallo, author of the original Python Redis client, from - which some of the socket code is still used. -* Alexander Solovyov for ideas on the generic response callback system. -* Paul Hubbard for initial packaging support. diff --git a/RELEASE b/RELEASE deleted file mode 100644 index f45b0bf9f6..0000000000 --- a/RELEASE +++ /dev/null @@ -1,9 +0,0 @@ -Release Process -=============== - -1. Make sure all tests pass. -2. Make sure CHANGES is up to date. -3. Update redis.__init__.__version__ and commit -4. git tag -5. git push --tag -6. rm dist/* && python setup.py sdist bdist_wheel && twine upload dist/* diff --git a/benchmarks/base.py b/benchmarks/base.py index 44e93414ff..f52657f072 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,12 +1,12 @@ import functools import itertools -import redis import sys import timeit -from redis._compat import izip + +import redis -class Benchmark(object): +class Benchmark: ARGUMENTS = () def __init__(self): @@ -16,9 +16,7 @@ def get_client(self, **kwargs): # eventually make this more robust and take optional args from # argparse if self._client is None or kwargs: - defaults = { - 'db': 9 - } + defaults = {"db": 9} defaults.update(kwargs) pool = redis.ConnectionPool(**kwargs) self._client = redis.Redis(connection_pool=pool) @@ -31,16 +29,16 @@ def run(self, **kwargs): pass def run_benchmark(self): - group_names = [group['name'] for group in self.ARGUMENTS] - group_values = [group['values'] for group in self.ARGUMENTS] + group_names = [group["name"] for group in self.ARGUMENTS] + group_values = [group["values"] for group in self.ARGUMENTS] for value_set in itertools.product(*group_values): - pairs = list(izip(group_names, value_set)) - arg_string = ', '.join(['%s=%s' % (p[0], p[1]) for p in pairs]) - sys.stdout.write('Benchmark: %s... ' % arg_string) + pairs = list(zip(group_names, value_set)) + arg_string = ", ".join(f"{p[0]}={p[1]}" for p in pairs) + sys.stdout.write(f"Benchmark: {arg_string}... ") sys.stdout.flush() kwargs = dict(pairs) setup = functools.partial(self.setup, **kwargs) run = functools.partial(self.run, **kwargs) t = timeit.timeit(stmt=run, setup=setup, number=1000) - sys.stdout.write('%f\n' % t) + sys.stdout.write(f"{t:f}\n") sys.stdout.flush() diff --git a/benchmarks/basic_operations.py b/benchmarks/basic_operations.py index a4b675d7ba..66cd6b320d 100644 --- a/benchmarks/basic_operations.py +++ b/benchmarks/basic_operations.py @@ -1,29 +1,27 @@ -from __future__ import print_function -import redis import time -import sys -from functools import wraps from argparse import ArgumentParser +from functools import wraps -if sys.version_info[0] == 3: - long = int +import redis def parse_args(): parser = ArgumentParser() - parser.add_argument('-n', - type=int, - help='Total number of requests (default 100000)', - default=100000) - parser.add_argument('-P', - type=int, - help=('Pipeline requests.' - ' Default 1 (no pipeline).'), - default=1) - parser.add_argument('-s', - type=int, - help='Data size of SET/GET value in bytes (default 2)', - default=2) + parser.add_argument( + "-n", type=int, help="Total number of requests (default 100000)", default=100000 + ) + parser.add_argument( + "-P", + type=int, + help=("Pipeline requests. Default 1 (no pipeline)."), + default=1, + ) + parser.add_argument( + "-s", + type=int, + help="Data size of SET/GET value in bytes (default 2)", + default=2, + ) args = parser.parse_args() return args @@ -47,18 +45,19 @@ def run(): def timer(func): @wraps(func) def wrapper(*args, **kwargs): - start = time.clock() + start = time.monotonic() ret = func(*args, **kwargs) - duration = time.clock() - start - if 'num' in kwargs: - count = kwargs['num'] + duration = time.monotonic() - start + if "num" in kwargs: + count = kwargs["num"] else: count = args[1] - print('{} - {} Requests'.format(func.__name__, count)) - print('Duration = {}'.format(duration)) - print('Rate = {}'.format(count/duration)) - print('') + print(f"{func.__name__} - {count} Requests") + print(f"Duration = {duration}") + print(f"Rate = {count / duration}") + print() return ret + return wrapper @@ -67,10 +66,9 @@ def set_str(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() - format_str = '{:0<%d}' % data_size - set_data = format_str.format('a') + set_data = "a".ljust(data_size, "0") for i in range(num): - conn.set('set_str:%d' % i, set_data) + conn.set(f"set_str:{i}", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -83,10 +81,9 @@ def set_int(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() - format_str = '{:0<%d}' % data_size - set_data = int(format_str.format('1')) + set_data = 10 ** (data_size - 1) for i in range(num): - conn.set('set_int:%d' % i, set_data) + conn.set(f"set_int:{i}", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -100,7 +97,7 @@ def get_str(conn, num, pipeline_size, data_size): conn = conn.pipeline() for i in range(num): - conn.get('set_str:%d' % i) + conn.get(f"set_str:{i}") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -114,7 +111,7 @@ def get_int(conn, num, pipeline_size, data_size): conn = conn.pipeline() for i in range(num): - conn.get('set_int:%d' % i) + conn.get(f"set_int:{i}") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -128,7 +125,7 @@ def incr(conn, num, pipeline_size, *args, **kwargs): conn = conn.pipeline() for i in range(num): - conn.incr('incr_key') + conn.incr("incr_key") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -141,10 +138,9 @@ def lpush(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() - format_str = '{:0<%d}' % data_size - set_data = int(format_str.format('1')) + set_data = 10 ** (data_size - 1) for i in range(num): - conn.lpush('lpush_key', set_data) + conn.lpush("lpush_key", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -158,7 +154,7 @@ def lrange_300(conn, num, pipeline_size, data_size): conn = conn.pipeline() for i in range(num): - conn.lrange('lpush_key', i, i+300) + conn.lrange("lpush_key", i, i + 300) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -171,7 +167,7 @@ def lpop(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): - conn.lpop('lpush_key') + conn.lpop("lpush_key") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: @@ -183,12 +179,9 @@ def hmset(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() - set_data = {'str_value': 'string', - 'int_value': 123456, - 'long_value': long(123456), - 'float_value': 123456.0} + set_data = {"str_value": "string", "int_value": 123456, "float_value": 123456.0} for i in range(num): - conn.hmset('hmset_key', set_data) + conn.hmset("hmset_key", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() @@ -196,5 +189,5 @@ def hmset(conn, num, pipeline_size, data_size): conn.execute() -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/benchmarks/cluster_async.py b/benchmarks/cluster_async.py new file mode 100644 index 0000000000..17dd52b5df --- /dev/null +++ b/benchmarks/cluster_async.py @@ -0,0 +1,263 @@ +import asyncio +import functools +import time + +import aioredis_cluster +import aredis +import uvloop + +import redis.asyncio as redispy + + +def timer(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + tic = time.perf_counter() + await func(*args, **kwargs) + toc = time.perf_counter() + return f"{toc - tic:.4f}" + + return wrapper + + +@timer +async def set_str(client, gather, data): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.set(f"bench:str_{i}", data)) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.set(f"bench:str_{i}", data) + + +@timer +async def set_int(client, gather, data): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.set(f"bench:int_{i}", data)) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.set(f"bench:int_{i}", data) + + +@timer +async def get_str(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *(asyncio.create_task(client.get(f"bench:str_{i}")) for i in range(100)) + ) + else: + for i in range(count): + await client.get(f"bench:str_{i}") + + +@timer +async def get_int(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *(asyncio.create_task(client.get(f"bench:int_{i}")) for i in range(100)) + ) + else: + for i in range(count): + await client.get(f"bench:int_{i}") + + +@timer +async def hset(client, gather, data): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.hset("bench:hset", str(i), data)) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.hset("bench:hset", str(i), data) + + +@timer +async def hget(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.hget("bench:hset", str(i))) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.hget("bench:hset", str(i)) + + +@timer +async def incr(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *(asyncio.create_task(client.incr("bench:incr")) for i in range(100)) + ) + else: + for i in range(count): + await client.incr("bench:incr") + + +@timer +async def lpush(client, gather, data): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.lpush("bench:lpush", data)) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.lpush("bench:lpush", data) + + +@timer +async def lrange_300(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *( + asyncio.create_task(client.lrange("bench:lpush", i, i + 300)) + for i in range(100) + ) + ) + else: + for i in range(count): + await client.lrange("bench:lpush", i, i + 300) + + +@timer +async def lpop(client, gather): + if gather: + for _ in range(count // 100): + await asyncio.gather( + *(asyncio.create_task(client.lpop("bench:lpush")) for i in range(100)) + ) + else: + for i in range(count): + await client.lpop("bench:lpush") + + +@timer +async def warmup(client): + await asyncio.gather( + *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100)) + ) + + +@timer +async def run(client, gather): + data_str = "a" * size + data_int = int("1" * size) + + if gather is False: + for ret in await asyncio.gather( + asyncio.create_task(set_str(client, gather, data_str)), + asyncio.create_task(set_int(client, gather, data_int)), + asyncio.create_task(hset(client, gather, data_str)), + asyncio.create_task(incr(client, gather)), + asyncio.create_task(lpush(client, gather, data_int)), + ): + print(ret) + for ret in await asyncio.gather( + asyncio.create_task(get_str(client, gather)), + asyncio.create_task(get_int(client, gather)), + asyncio.create_task(hget(client, gather)), + asyncio.create_task(lrange_300(client, gather)), + asyncio.create_task(lpop(client, gather)), + ): + print(ret) + else: + print(await set_str(client, gather, data_str)) + print(await set_int(client, gather, data_int)) + print(await hset(client, gather, data_str)) + print(await incr(client, gather)) + print(await lpush(client, gather, data_int)) + + print(await get_str(client, gather)) + print(await get_int(client, gather)) + print(await hget(client, gather)) + print(await lrange_300(client, gather)) + print(await lpop(client, gather)) + + +async def main(loop, gather=None): + arc = aredis.StrictRedisCluster( + host=host, + port=port, + password=password, + max_connections=2**31, + max_connections_per_node=2**31, + readonly=False, + reinitialize_steps=count, + skip_full_coverage_check=True, + decode_responses=False, + max_idle_time=count, + idle_check_interval=count, + ) + print(f"{loop} {gather} {await warmup(arc)} aredis") + print(await run(arc, gather=gather)) + arc.connection_pool.disconnect() + + aiorc = await aioredis_cluster.create_redis_cluster( + [(host, port)], + password=password, + state_reload_interval=count, + idle_connection_timeout=count, + pool_maxsize=2**31, + ) + print(f"{loop} {gather} {await warmup(aiorc)} aioredis-cluster") + print(await run(aiorc, gather=gather)) + aiorc.close() + await aiorc.wait_closed() + + async with redispy.RedisCluster( + host=host, + port=port, + password=password, + reinitialize_steps=count, + read_from_replicas=False, + decode_responses=False, + max_connections=2**31, + ) as rca: + print(f"{loop} {gather} {await warmup(rca)} redispy") + print(await run(rca, gather=gather)) + + +if __name__ == "__main__": + host = "localhost" + port = 16379 + password = None + + count = 10000 + size = 256 + + asyncio.run(main("asyncio")) + asyncio.run(main("asyncio", gather=False)) + asyncio.run(main("asyncio", gather=True)) + + uvloop.install() + + asyncio.run(main("uvloop")) + asyncio.run(main("uvloop", gather=False)) + asyncio.run(main("uvloop", gather=True)) diff --git a/benchmarks/cluster_async_pipeline.py b/benchmarks/cluster_async_pipeline.py new file mode 100644 index 0000000000..af45b44511 --- /dev/null +++ b/benchmarks/cluster_async_pipeline.py @@ -0,0 +1,107 @@ +import asyncio +import functools +import time + +import aioredis_cluster +import aredis +import uvloop + +import redis.asyncio as redispy + + +def timer(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + tic = time.perf_counter() + await func(*args, **kwargs) + toc = time.perf_counter() + return f"{toc - tic:.4f}" + + return wrapper + + +@timer +async def warmup(client): + await asyncio.gather( + *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100)) + ) + + +@timer +async def run(client): + data_str = "a" * size + data_int = int("1" * size) + + for i in range(count): + with client.pipeline() as pipe: + await ( + pipe.set(f"bench:str_{i}", data_str) + .set(f"bench:int_{i}", data_int) + .get(f"bench:str_{i}") + .get(f"bench:int_{i}") + .hset("bench:hset", str(i), data_str) + .hget("bench:hset", str(i)) + .incr("bench:incr") + .lpush("bench:lpush", data_int) + .lrange("bench:lpush", 0, 300) + .lpop("bench:lpush") + .execute() + ) + + +async def main(loop): + arc = aredis.StrictRedisCluster( + host=host, + port=port, + password=password, + max_connections=2**31, + max_connections_per_node=2**31, + readonly=False, + reinitialize_steps=count, + skip_full_coverage_check=True, + decode_responses=False, + max_idle_time=count, + idle_check_interval=count, + ) + print(f"{loop} {await warmup(arc)} aredis") + print(await run(arc)) + arc.connection_pool.disconnect() + + aiorc = await aioredis_cluster.create_redis_cluster( + [(host, port)], + password=password, + state_reload_interval=count, + idle_connection_timeout=count, + pool_maxsize=2**31, + ) + print(f"{loop} {await warmup(aiorc)} aioredis-cluster") + print(await run(aiorc)) + aiorc.close() + await aiorc.wait_closed() + + async with redispy.RedisCluster( + host=host, + port=port, + password=password, + reinitialize_steps=count, + read_from_replicas=False, + decode_responses=False, + max_connections=2**31, + ) as rca: + print(f"{loop} {await warmup(rca)} redispy") + print(await run(rca)) + + +if __name__ == "__main__": + host = "localhost" + port = 16379 + password = None + + count = 10000 + size = 256 + + asyncio.run(main("asyncio")) + + uvloop.install() + + asyncio.run(main("uvloop")) diff --git a/benchmarks/command_packer_benchmark.py b/benchmarks/command_packer_benchmark.py index 1216df6775..4fb7196422 100644 --- a/benchmarks/command_packer_benchmark.py +++ b/benchmarks/command_packer_benchmark.py @@ -1,9 +1,7 @@ -import socket -from redis.connection import (Connection, SYM_STAR, SYM_DOLLAR, SYM_EMPTY, - SYM_CRLF) -from redis._compat import imap from base import Benchmark +from redis.connection import SYM_CRLF, SYM_DOLLAR, SYM_EMPTY, SYM_STAR, Connection + class StringJoiningConnection(Connection): def send_packed_command(self, command, check_health=True): @@ -12,26 +10,30 @@ def send_packed_command(self, command, check_health=True): self.connect() try: self._sock.sendall(command) - except socket.error as e: + except OSError as e: self.disconnect() if len(e.args) == 1: - _errno, errmsg = 'UNKNOWN', e.args[0] + _errno, errmsg = "UNKNOWN", e.args[0] else: _errno, errmsg = e.args - raise ConnectionError("Error %s while writing to socket. %s." % - (_errno, errmsg)) + raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.") except Exception: self.disconnect() raise def pack_command(self, *args): "Pack a series of arguments into a value Redis command" - args_output = SYM_EMPTY.join([ - SYM_EMPTY.join( - (SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF)) - for k in imap(self.encoder.encode, args)]) + args_output = SYM_EMPTY.join( + [ + SYM_EMPTY.join( + (SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF) + ) + for k in map(self.encoder.encode, args) + ] + ) output = SYM_EMPTY.join( - (SYM_STAR, str(len(args)).encode(), SYM_CRLF, args_output)) + (SYM_STAR, str(len(args)).encode(), SYM_CRLF, args_output) + ) return output @@ -44,48 +46,46 @@ def send_packed_command(self, command, check_health=True): command = [command] for item in command: self._sock.sendall(item) - except socket.error as e: + except OSError as e: self.disconnect() if len(e.args) == 1: - _errno, errmsg = 'UNKNOWN', e.args[0] + _errno, errmsg = "UNKNOWN", e.args[0] else: _errno, errmsg = e.args - raise ConnectionError("Error %s while writing to socket. %s." % - (_errno, errmsg)) + raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.") except Exception: self.disconnect() raise def pack_command(self, *args): output = [] - buff = SYM_EMPTY.join( - (SYM_STAR, str(len(args)).encode(), SYM_CRLF)) + buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) - for k in imap(self.encoder.encode, args): + for k in map(self.encoder.encode, args): if len(buff) > 6000 or len(k) > 6000: buff = SYM_EMPTY.join( - (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF)) + (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF) + ) output.append(buff) output.append(k) buff = SYM_CRLF else: - buff = SYM_EMPTY.join((buff, SYM_DOLLAR, str(len(k)).encode(), - SYM_CRLF, k, SYM_CRLF)) + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF) + ) output.append(buff) return output class CommandPackerBenchmark(Benchmark): - ARGUMENTS = ( { - 'name': 'connection_class', - 'values': [StringJoiningConnection, ListJoiningConnection] + "name": "connection_class", + "values": [StringJoiningConnection, ListJoiningConnection], }, { - 'name': 'value_size', - 'values': [10, 100, 1000, 10000, 100000, 1000000, 10000000, - 100000000] + "name": "value_size", + "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000], }, ) @@ -94,9 +94,9 @@ def setup(self, connection_class, value_size): def run(self, connection_class, value_size): r = self.get_client() - x = 'a' * value_size - r.set('benchmark', x) + x = "a" * value_size + r.set("benchmark", x) -if __name__ == '__main__': +if __name__ == "__main__": CommandPackerBenchmark().run_benchmark() diff --git a/benchmarks/socket_read_size.py b/benchmarks/socket_read_size.py index 72a1b0a7e3..37ffa97812 100644 --- a/benchmarks/socket_read_size.py +++ b/benchmarks/socket_read_size.py @@ -1,34 +1,26 @@ -from redis.connection import PythonParser, HiredisParser from base import Benchmark +from redis.connection import PythonParser, _HiredisParser -class SocketReadBenchmark(Benchmark): +class SocketReadBenchmark(Benchmark): ARGUMENTS = ( + {"name": "parser", "values": [PythonParser, _HiredisParser]}, { - 'name': 'parser', - 'values': [PythonParser, HiredisParser] + "name": "value_size", + "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000], }, - { - 'name': 'value_size', - 'values': [10, 100, 1000, 10000, 100000, 1000000, 10000000, - 100000000] - }, - { - 'name': 'read_size', - 'values': [4096, 8192, 16384, 32768, 65536, 131072] - } + {"name": "read_size", "values": [4096, 8192, 16384, 32768, 65536, 131072]}, ) def setup(self, value_size, read_size, parser): - r = self.get_client(parser_class=parser, - socket_read_size=read_size) - r.set('benchmark', 'a' * value_size) + r = self.get_client(parser_class=parser, socket_read_size=read_size) + r.set("benchmark", "a" * value_size) def run(self, value_size, read_size, parser): r = self.get_client() - r.get('benchmark') + r.get("benchmark") -if __name__ == '__main__': +if __name__ == "__main__": SocketReadBenchmark().run_benchmark() diff --git a/build_tools/.bash_profile b/build_tools/.bash_profile deleted file mode 100644 index b023cf70a7..0000000000 --- a/build_tools/.bash_profile +++ /dev/null @@ -1 +0,0 @@ -PATH=$PATH:/var/lib/redis/bin diff --git a/build_tools/bootstrap.sh b/build_tools/bootstrap.sh deleted file mode 100755 index a5a0d2ce83..0000000000 --- a/build_tools/bootstrap.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash - -# need make to build redis -sudo apt-get install make diff --git a/build_tools/build_redis.sh b/build_tools/build_redis.sh deleted file mode 100755 index 379c6cc936..0000000000 --- a/build_tools/build_redis.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -source /home/vagrant/redis-py/build_tools/redis_vars.sh - -pushd /home/vagrant - -uninstall_all_sentinel_instances -uninstall_all_redis_instances - -# create a clean directory for redis -rm -rf $REDIS_DIR -mkdir -p $REDIS_BIN_DIR -mkdir -p $REDIS_CONF_DIR -mkdir -p $REDIS_SAVE_DIR - -# download, unpack and build redis -mkdir -p $REDIS_DOWNLOAD_DIR -cd $REDIS_DOWNLOAD_DIR -rm -f $REDIS_PACKAGE -rm -rf $REDIS_BUILD_DIR -wget http://download.redis.io/releases/$REDIS_PACKAGE -tar zxvf $REDIS_PACKAGE -cd $REDIS_BUILD_DIR -make -cp src/redis-server $REDIS_DIR/bin -cp src/redis-cli $REDIS_DIR/bin -cp src/redis-sentinel $REDIS_DIR/bin - -popd diff --git a/build_tools/install_redis.sh b/build_tools/install_redis.sh deleted file mode 100755 index fd53a1ca88..0000000000 --- a/build_tools/install_redis.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -source /home/vagrant/redis-py/build_tools/redis_vars.sh - -for filename in `ls $VAGRANT_REDIS_CONF_DIR`; do - # cuts the order prefix off of the filename, e.g. 001-master -> master - PROCESS_NAME=redis-`echo $filename | cut -f 2- -d -` - echo "======================================" - echo "INSTALLING REDIS SERVER: $PROCESS_NAME" - echo "======================================" - - # make sure the instance is uninstalled (it should be already) - uninstall_instance $PROCESS_NAME - - # base config - mkdir -p $REDIS_CONF_DIR - cp $REDIS_BUILD_DIR/redis.conf $REDIS_CONF_DIR/$PROCESS_NAME.conf - # override config values from file - cat $VAGRANT_REDIS_CONF_DIR/$filename >> $REDIS_CONF_DIR/$PROCESS_NAME.conf - - # replace placeholder variables in init.d script - cp $VAGRANT_DIR/redis_init_script /etc/init.d/$PROCESS_NAME - sed -i "s/{{ PROCESS_NAME }}/$PROCESS_NAME/g" /etc/init.d/$PROCESS_NAME - # need to read the config file to find out what port this instance will run on - port=`grep port $VAGRANT_REDIS_CONF_DIR/$filename | cut -f 2 -d " "` - sed -i "s/{{ PORT }}/$port/g" /etc/init.d/$PROCESS_NAME - chmod 755 /etc/init.d/$PROCESS_NAME - - # and tell update-rc.d about it - update-rc.d $PROCESS_NAME defaults 98 - - # save the $PROCESS_NAME into installed instances file - echo $PROCESS_NAME >> $REDIS_INSTALLED_INSTANCES_FILE - - # start redis - /etc/init.d/$PROCESS_NAME start -done diff --git a/build_tools/install_sentinel.sh b/build_tools/install_sentinel.sh deleted file mode 100755 index 0597208ccf..0000000000 --- a/build_tools/install_sentinel.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -source /home/vagrant/redis-py/build_tools/redis_vars.sh - -for filename in `ls $VAGRANT_SENTINEL_CONF_DIR`; do - # cuts the order prefix off of the filename, e.g. 001-master -> master - PROCESS_NAME=sentinel-`echo $filename | cut -f 2- -d -` - echo "=========================================" - echo "INSTALLING SENTINEL SERVER: $PROCESS_NAME" - echo "=========================================" - - # make sure the instance is uninstalled (it should be already) - uninstall_instance $PROCESS_NAME - - # base config - mkdir -p $REDIS_CONF_DIR - cp $REDIS_BUILD_DIR/sentinel.conf $REDIS_CONF_DIR/$PROCESS_NAME.conf - # override config values from file - cat $VAGRANT_SENTINEL_CONF_DIR/$filename >> $REDIS_CONF_DIR/$PROCESS_NAME.conf - - # replace placeholder variables in init.d script - cp $VAGRANT_DIR/sentinel_init_script /etc/init.d/$PROCESS_NAME - sed -i "s/{{ PROCESS_NAME }}/$PROCESS_NAME/g" /etc/init.d/$PROCESS_NAME - # need to read the config file to find out what port this instance will run on - port=`grep port $VAGRANT_SENTINEL_CONF_DIR/$filename | cut -f 2 -d " "` - sed -i "s/{{ PORT }}/$port/g" /etc/init.d/$PROCESS_NAME - chmod 755 /etc/init.d/$PROCESS_NAME - - # and tell update-rc.d about it - update-rc.d $PROCESS_NAME defaults 99 - - # save the $PROCESS_NAME into installed instances file - echo $PROCESS_NAME >> $SENTINEL_INSTALLED_INSTANCES_FILE - - # start redis - /etc/init.d/$PROCESS_NAME start -done diff --git a/build_tools/redis-configs/001-master b/build_tools/redis-configs/001-master deleted file mode 100644 index 8591f1a61e..0000000000 --- a/build_tools/redis-configs/001-master +++ /dev/null @@ -1,8 +0,0 @@ -pidfile /var/run/redis-master.pid -bind * -port 6379 -daemonize yes -unixsocket /tmp/redis_master.sock -unixsocketperm 777 -dbfilename master.rdb -dir /var/lib/redis/backups diff --git a/build_tools/redis-configs/002-slave b/build_tools/redis-configs/002-slave deleted file mode 100644 index 13eb77ec4d..0000000000 --- a/build_tools/redis-configs/002-slave +++ /dev/null @@ -1,10 +0,0 @@ -pidfile /var/run/redis-slave.pid -bind * -port 6380 -daemonize yes -unixsocket /tmp/redis-slave.sock -unixsocketperm 777 -dbfilename slave.rdb -dir /var/lib/redis/backups - -slaveof 127.0.0.1 6379 diff --git a/build_tools/redis_init_script b/build_tools/redis_init_script deleted file mode 100755 index 04cb2dbc7c..0000000000 --- a/build_tools/redis_init_script +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -### BEGIN INIT INFO -# Provides: redis-server -# Required-Start: $syslog -# Required-Stop: $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Start redis-server at boot time -# Description: Control redis-server. -### END INIT INFO - -REDISPORT={{ PORT }} -PIDFILE=/var/run/{{ PROCESS_NAME }}.pid -CONF=/var/lib/redis/conf/{{ PROCESS_NAME }}.conf - -EXEC=/var/lib/redis/bin/redis-server -CLIEXEC=/var/lib/redis/bin/redis-cli - -case "$1" in - start) - if [ -f $PIDFILE ] - then - echo "$PIDFILE exists, process is already running or crashed" - else - echo "Starting Redis server..." - $EXEC $CONF - fi - ;; - stop) - if [ ! -f $PIDFILE ] - then - echo "$PIDFILE does not exist, process is not running" - else - PID=$(cat $PIDFILE) - echo "Stopping ..." - $CLIEXEC -p $REDISPORT shutdown - while [ -x /proc/${PID} ] - do - echo "Waiting for Redis to shutdown ..." - sleep 1 - done - echo "Redis stopped" - fi - ;; - *) - echo "Please use start or stop as first argument" - ;; -esac diff --git a/build_tools/redis_vars.sh b/build_tools/redis_vars.sh deleted file mode 100755 index c52dd4cf37..0000000000 --- a/build_tools/redis_vars.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -VAGRANT_DIR=/home/vagrant/redis-py/build_tools -VAGRANT_REDIS_CONF_DIR=$VAGRANT_DIR/redis-configs -VAGRANT_SENTINEL_CONF_DIR=$VAGRANT_DIR/sentinel-configs -REDIS_VERSION=3.2.0 -REDIS_DOWNLOAD_DIR=/home/vagrant/redis-downloads -REDIS_PACKAGE=redis-$REDIS_VERSION.tar.gz -REDIS_BUILD_DIR=$REDIS_DOWNLOAD_DIR/redis-$REDIS_VERSION -REDIS_DIR=/var/lib/redis -REDIS_BIN_DIR=$REDIS_DIR/bin -REDIS_CONF_DIR=$REDIS_DIR/conf -REDIS_SAVE_DIR=$REDIS_DIR/backups -REDIS_INSTALLED_INSTANCES_FILE=$REDIS_DIR/redis-instances -SENTINEL_INSTALLED_INSTANCES_FILE=$REDIS_DIR/sentinel-instances - -function uninstall_instance() { - # Expects $1 to be the init.d filename, e.g. redis-nodename or - # sentinel-nodename - - if [ -a /etc/init.d/$1 ]; then - - echo "======================================" - echo "UNINSTALLING REDIS SERVER: $1" - echo "======================================" - - /etc/init.d/$1 stop - update-rc.d -f $1 remove - rm -f /etc/init.d/$1 - fi; - rm -f $REDIS_CONF_DIR/$1.conf -} - -function uninstall_all_redis_instances() { - if [ -a $REDIS_INSTALLED_INSTANCES_FILE ]; then - cat $REDIS_INSTALLED_INSTANCES_FILE | while read line; do - uninstall_instance $line; - done; - fi -} - -function uninstall_all_sentinel_instances() { - if [ -a $SENTINEL_INSTALLED_INSTANCES_FILE ]; then - cat $SENTINEL_INSTALLED_INSTANCES_FILE | while read line; do - uninstall_instance $line; - done; - fi -} diff --git a/build_tools/sentinel-configs/001-1 b/build_tools/sentinel-configs/001-1 deleted file mode 100644 index eccc3d1f84..0000000000 --- a/build_tools/sentinel-configs/001-1 +++ /dev/null @@ -1,6 +0,0 @@ -pidfile /var/run/sentinel-1.pid -port 26379 -daemonize yes - -# short timeout for sentinel tests -sentinel down-after-milliseconds mymaster 500 diff --git a/build_tools/sentinel-configs/002-2 b/build_tools/sentinel-configs/002-2 deleted file mode 100644 index 0cd28019c4..0000000000 --- a/build_tools/sentinel-configs/002-2 +++ /dev/null @@ -1,6 +0,0 @@ -pidfile /var/run/sentinel-2.pid -port 26380 -daemonize yes - -# short timeout for sentinel tests -sentinel down-after-milliseconds mymaster 500 diff --git a/build_tools/sentinel-configs/003-3 b/build_tools/sentinel-configs/003-3 deleted file mode 100644 index c7f4fcd335..0000000000 --- a/build_tools/sentinel-configs/003-3 +++ /dev/null @@ -1,6 +0,0 @@ -pidfile /var/run/sentinel-3.pid -port 26381 -daemonize yes - -# short timeout for sentinel tests -sentinel down-after-milliseconds mymaster 500 diff --git a/build_tools/sentinel_init_script b/build_tools/sentinel_init_script deleted file mode 100755 index 1d94804e9c..0000000000 --- a/build_tools/sentinel_init_script +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -### BEGIN INIT INFO -# Provides: redis-sentintel -# Required-Start: $syslog -# Required-Stop: $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Start redis-sentinel at boot time -# Description: Control redis-sentinel. -### END INIT INFO - -SENTINELPORT={{ PORT }} -PIDFILE=/var/run/{{ PROCESS_NAME }}.pid -CONF=/var/lib/redis/conf/{{ PROCESS_NAME }}.conf - -EXEC=/var/lib/redis/bin/redis-sentinel -CLIEXEC=/var/lib/redis/bin/redis-cli - -case "$1" in - start) - if [ -f $PIDFILE ] - then - echo "$PIDFILE exists, process is already running or crashed" - else - echo "Starting Redis Sentinel..." - $EXEC $CONF - fi - ;; - stop) - if [ ! -f $PIDFILE ] - then - echo "$PIDFILE does not exist, process is not running" - else - PID=$(cat $PIDFILE) - echo "Stopping ..." - $CLIEXEC -p $SENTINELPORT shutdown - while [ -x /proc/${PID} ] - do - echo "Waiting for Sentinel to shutdown ..." - sleep 1 - done - echo "Sentinel stopped" - fi - ;; - *) - echo "Please use start or stop as first argument" - ;; -esac diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..449ec0c50f --- /dev/null +++ b/codecov.yml @@ -0,0 +1,13 @@ +ignore: + - "benchmarks/**" + - "tasks.py" + +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "80...100" + status: + patch: off # off for now as it yells about everything diff --git a/dev_requirements.txt b/dev_requirements.txt new file mode 100644 index 0000000000..0f6be6e848 --- /dev/null +++ b/dev_requirements.txt @@ -0,0 +1,32 @@ +build +build==1.2.2.post1 ; platform_python_implementation == "PyPy" +click==8.0.4 +invoke==2.2.0 +mock +mock==5.1.0 ; platform_python_implementation == "PyPy" +packaging>=20.4 +packaging==24.2 ; platform_python_implementation == "PyPy" + +pytest +pytest==8.3.4 ; platform_python_implementation == "PyPy" +pytest-asyncio>=0.23.0 +pytest-asyncio==1.1.0 ; platform_python_implementation == "PyPy" +pytest-cov +coverage<7.11.1 +pytest-cov==6.0.0 ; platform_python_implementation == "PyPy" +coverage==7.6.12 ; platform_python_implementation == "PyPy" +pytest-profiling==1.8.1 +pytest-timeout +pytest-timeout==2.3.1 ; platform_python_implementation == "PyPy" + +ruff==0.9.6 +ujson>=4.2.0 +uvloop<=0.21.0; platform_python_implementation == "CPython" and python_version < "3.14" +uvloop>=0.22; platform_python_implementation == "CPython" and python_version >= "3.14" +vulture>=2.3.0 + +numpy>=1.24.0 ; platform_python_implementation == "CPython" +numpy>=1.24.0,<2.0 ; platform_python_implementation == "PyPy" + +redis-entraid==1.0.0 +pybreaker>=1.4.0 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..1699cf61af --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,108 @@ +--- +# image tag 8.0-RC2-pre is the one matching the 8.0 GA release +x-client-libs-stack-image: &client-libs-stack-image + image: "redislabs/client-libs-test:${CLIENT_LIBS_TEST_STACK_IMAGE_TAG:-8.4-RC1-pre.2}" + +x-client-libs-image: &client-libs-image + image: "redislabs/client-libs-test:${CLIENT_LIBS_TEST_IMAGE_TAG:-8.4-RC1-pre.2}" + +services: + + redis: + <<: *client-libs-image + container_name: redis-standalone + environment: + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=6379 + - TLS_PORT=6666 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6379:6379 + - 6666:6666 # TLS port + volumes: + - "./dockers/standalone:/redis/work" + profiles: + - standalone + - sentinel + - replica + - all-stack + - all + + replica: + <<: *client-libs-image + container_name: redis-replica + depends_on: + - redis + environment: + - TLS_ENABLED=no + - REDIS_CLUSTER=no + - PORT=6380 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --replicaof redis 6379 --protected-mode no --save ""} + ports: + - 6380:6380 + volumes: + - "./dockers/replica:/redis/work" + profiles: + - replica + - all-stack + - all + + cluster: + <<: *client-libs-image + container_name: redis-cluster + environment: + - REDIS_CLUSTER=yes + - NODES=6 + - REPLICAS=1 + - TLS_ENABLED=yes + - PORT=16379 + - TLS_PORT=27379 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save "" --tls-cluster yes} + ports: + - "16379-16384:16379-16384" + - "27379-27384:27379-27384" + volumes: + - "./dockers/cluster:/redis/work" + profiles: + - cluster + - all-stack + - all + + sentinel: + <<: *client-libs-image + container_name: redis-sentinel + depends_on: + - redis + environment: + - REDIS_CLUSTER=no + - NODES=3 + - PORT=26379 + command: ${REDIS_EXTRA_ARGS:---sentinel} + ports: + - 26379:26379 + - 26380:26380 + - 26381:26381 + volumes: + - "./dockers/sentinel.conf:/redis/config-default/redis.conf" + - "./dockers/sentinel:/redis/work" + profiles: + - sentinel + - all-stack + - all + + redis-stack: + <<: *client-libs-stack-image + container_name: redis-stack + environment: + - REDIS_CLUSTER=no + - PORT=6379 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --save ""} + ports: + - 6479:6379 + volumes: + - "./dockers/redis-stack:/redis/work" + profiles: + - standalone + - all-stack + - all diff --git a/dockers/sentinel.conf b/dockers/sentinel.conf new file mode 100644 index 0000000000..817a528765 --- /dev/null +++ b/dockers/sentinel.conf @@ -0,0 +1,8 @@ +sentinel resolve-hostnames yes +sentinel monitor redis-py-test redis 6379 2 +# Be much more tolerant to transient stalls (index builds, GC, I/O) +sentinel down-after-milliseconds redis-py-test 60000 +# Avoid rapid repeated failover attempts +sentinel failover-timeout redis-py-test 180000 +# Keep it conservative: sync one replica at a time +sentinel parallel-syncs redis-py-test 1 \ No newline at end of file diff --git a/docs/_static/logo-redis.svg b/docs/_static/logo-redis.svg new file mode 100644 index 0000000000..a8de68d23c --- /dev/null +++ b/docs/_static/logo-redis.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/docs/advanced_features.rst b/docs/advanced_features.rst new file mode 100644 index 0000000000..11ab8af716 --- /dev/null +++ b/docs/advanced_features.rst @@ -0,0 +1,557 @@ +Advanced Features +================= + +A note about threading +---------------------- + +Redis client instances can safely be shared between threads. Internally, +connection instances are only retrieved from the connection pool during +command execution, and returned to the pool directly after. Command +execution never modifies state on the client instance. + +However, there is one caveat: the Redis SELECT command. The SELECT +command allows you to switch the database currently in use by the +connection. That database remains selected until another is selected or +until the connection is closed. This creates an issue in that +connections could be returned to the pool that are connected to a +different database. + +As a result, redis-py does not implement the SELECT command on client +instances. If you use multiple Redis databases within the same +application, you should create a separate client instance (and possibly +a separate connection pool) for each database. + +It is not safe to pass PubSub or Pipeline objects between threads. + +Pipelines +--------- + +Default pipelines +~~~~~~~~~~~~~~~~~ + +Pipelines are a subclass of the base Redis class that provide support +for buffering multiple commands to the server in a single request. They +can be used to dramatically increase the performance of groups of +commands by reducing the number of back-and-forth TCP packets between +the client and server. + +Pipelines are quite simple to use: + +.. code:: python + + >>> r = redis.Redis(...) + >>> r.set('bing', 'baz') + >>> # Use the pipeline() method to create a pipeline instance + >>> pipe = r.pipeline() + >>> # The following SET commands are buffered + >>> pipe.set('foo', 'bar') + >>> pipe.get('bing') + >>> # the EXECUTE call sends all buffered commands to the server, returning + >>> # a list of responses, one for each command. + >>> pipe.execute() + [True, b'baz'] + +For ease of use, all commands being buffered into the pipeline return +the pipeline object itself. Therefore calls can be chained like: + +.. code:: python + + >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() + [True, True, 6] + +In addition, pipelines can also ensure the buffered commands are +executed atomically as a group. This happens by default. If you want to +disable the atomic nature of a pipeline but still want to buffer +commands, you can turn off transactions. + +.. code:: python + + >>> pipe = r.pipeline(transaction=False) + +A common issue occurs when requiring atomic transactions but needing to +retrieve values in Redis prior for use within the transaction. For +instance, let's assume that the INCR command didn't exist and we need to +build an atomic version of INCR in Python. + +The completely naive implementation could GET the value, increment it in +Python, and SET the new value back. However, this is not atomic because +multiple clients could be doing this at the same time, each getting the +same value from GET. + +Enter the WATCH command. WATCH provides the ability to monitor one or +more keys prior to starting a transaction. If any of those keys change +prior the execution of that transaction, the entire transaction will be +canceled and a WatchError will be raised. To implement our own +client-side INCR command, we could do something like this: + +.. code:: python + + >>> with r.pipeline() as pipe: + ... while True: + ... try: + ... # put a WATCH on the key that holds our sequence value + ... pipe.watch('OUR-SEQUENCE-KEY') + ... # after WATCHing, the pipeline is put into immediate execution + ... # mode until we tell it to start buffering commands again. + ... # this allows us to get the current value of our sequence + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... # now we can put the pipeline back into buffered mode with MULTI + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + ... # and finally, execute the pipeline (the set command) + ... pipe.execute() + ... # if a WatchError wasn't raised during execution, everything + ... # we just did happened atomically. + ... break + ... except WatchError: + ... # another client must have changed 'OUR-SEQUENCE-KEY' between + ... # the time we started WATCHing it and the pipeline's execution. + ... # our best bet is to just retry. + ... continue + +Note that, because the Pipeline must bind to a single connection for the +duration of a WATCH, care must be taken to ensure that the connection is +returned to the connection pool by calling the reset() method. If the +Pipeline is used as a context manager (as in the example above) reset() +will be called automatically. Of course you can do this the manual way +by explicitly calling reset(): + +.. code:: python + + >>> pipe = r.pipeline() + >>> while True: + ... try: + ... pipe.watch('OUR-SEQUENCE-KEY') + ... ... + ... pipe.execute() + ... break + ... except WatchError: + ... continue + ... finally: + ... pipe.reset() + +A convenience method named "transaction" exists for handling all the +boilerplate of handling and retrying watch errors. It takes a callable +that should expect a single parameter, a pipeline object, and any number +of keys to be WATCHed. Our client-side INCR command above can be written +like this, which is much easier to read: + +.. code:: python + + >>> def client_side_incr(pipe): + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + >>> + >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') + [True] + +Be sure to call pipe.multi() in the callable passed to Redis.transaction +prior to any write commands. + +Pipelines in clusters +~~~~~~~~~~~~~~~~~~~~~ + +ClusterPipeline is a subclass of RedisCluster that provides support for +Redis pipelines in cluster mode. When calling the execute() command, all +the commands are grouped by the node on which they will be executed, and +are then executed by the respective nodes in parallel. The pipeline +instance will wait for all the nodes to respond before returning the +result to the caller. Command responses are returned as a list sorted in +the same order in which they were sent. Pipelines can be used to +dramatically increase the throughput of Redis Cluster by significantly +reducing the number of network round trips between the client and +the server. + +.. code:: python + + >>> rc = RedisCluster() + >>> with rc.pipeline() as pipe: + ... pipe.set('foo', 'value1') + ... pipe.set('bar', 'value2') + ... pipe.get('foo') + ... pipe.get('bar') + ... print(pipe.execute()) + [True, True, b'value1', b'value2'] + ... pipe.set('foo1', 'bar1').get('foo1').execute() + [True, b'bar1'] + +Please note: + +- RedisCluster pipelines currently only support key-based commands. +- The pipeline gets its ‘load_balancing_strategy’ value from the + cluster’s parameter. Thus, if read from replications is enabled in + the cluster instance, the pipeline will also direct read commands to + replicas. + + +Transactions in clusters +~~~~~~~~~~~~~~~~~~~~~~~~ + +Transactions are supported in cluster-mode with one caveat: all keys of +all commands issued on a transaction pipeline must reside on the +same slot. This is similar to the limitation of multikey commands in +cluster. The reason behind this is that the Redis engine does not offer +a mechanism to block or exchange key data across nodes on the fly. A +client may add some logic to abstract engine limitations when running +on a cluster, such as the pipeline behavior explained on the previous +block, but there is no simple way that a client can enforce atomicity +across nodes on a distributed system. + +The compromise of limiting the transaction pipeline to same-slot keys +is exactly that: a compromise. While this behavior is different from +non-transactional cluster pipelines, it simplifies migration of clients +from standalone to cluster under some circumstances. Note that application +code that issues multi/exec commands on a standalone client without +embedding them within a pipeline would eventually get ‘AttributeError’. +With this approach, if the application uses ‘client.pipeline(transaction=True)’, +then switching the client with a cluster-aware instance would simplify +code changes (to some extent). This may be true for application code that +makes use of hash keys, since its transactions may already be +mapping all commands to the same slot. + +An alternative is some kind of two-step commit solution, where a slot +validation is run before the actual commands are run. This could work +with controlled node maintenance but does not cover single node failures. + +Given the cluster limitations for transactions, by default pipeline isn't in +transactional mode. To enable transactional context set: + +.. code:: python + + >>> p = rc.pipeline(transaction=True) + +After entering the transactional context you can add commands to a transactional +context, by one of the following ways: + +.. code:: python + + >>> p = rc.pipeline(transaction=True) # Chaining commands + >>> p.set("key", "value") + >>> p.get("key") + >>> response = p.execute() + +Or + +.. code:: python + + >>> with rc.pipeline(transaction=True) as pipe: # Using context manager + ... pipe.set("key", "value") + ... pipe.get("key") + ... response = pipe.execute() + +As you see there's no need to explicitly send `MULTI/EXEC` commands to control context start/end +`ClusterPipeline` will take care of it. + +To ensure that different keys will be mapped to a same hash slot on the server side +prepend your keys with the same hash tag, the technique that allows you to control +keys distribution. +More information `here `_ + +.. code:: python + + >>> with rc.pipeline(transaction=True) as pipe: + ... pipe.set("{tag}foo", "bar") + ... pipe.set("{tag}bar", "foo") + ... pipe.get("{tag}foo") + ... pipe.get("{tag}bar") + ... response = pipe.execute() + +CAS Transactions +~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want to apply optimistic locking for certain keys, you have to execute +`WATCH` command in transactional context. `WATCH` command follows the same limitations +as any other multi key command - all keys should be mapped to the same hash slot. + +However, the difference between CAS transaction and normal one is that you have to +explicitly call MULTI command to indicate the start of transactional context, WATCH +command itself and any subsequent commands before MULTI will be immediately executed +on the server side so you can apply optimistic locking and get necessary data before +transaction execution. + +.. code:: python + + >>> with rc.pipeline(transaction=True) as pipe: + ... pipe.watch("mykey") # Apply locking by immediately executing command + ... val = pipe.get("mykey") # Immediately retrieves value + ... val = val + 1 # Increment value + ... pipe.multi() # Starting transaction context + ... pipe.set("mykey", val) # Command will be pipelined + ... response = pipe.execute() # Returns OK or None if key was modified in the meantime + + +Publish / Subscribe +------------------- + +redis-py includes a PubSub object that subscribes to channels and +listens for new messages. Creating a PubSub object is easy. + +.. code:: python + + >>> r = redis.Redis(...) + >>> p = r.pubsub() + +Once a PubSub instance is created, channels and patterns can be +subscribed to. + +.. code:: python + + >>> p.subscribe('my-first-channel', 'my-second-channel', ...) + >>> p.psubscribe('my-*', ...) + +The PubSub instance is now subscribed to those channels/patterns. The +subscription confirmations can be seen by reading messages from the +PubSub instance. + +.. code:: python + + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2} + >>> p.get_message() + {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3} + +Every message read from a PubSub instance will be a dictionary with the +following keys. + +- **type**: One of the following: 'subscribe', 'unsubscribe', + 'psubscribe', 'punsubscribe', 'message', 'pmessage' +- **channel**: The channel [un]subscribed to or the channel a message + was published to +- **pattern**: The pattern that matched a published message's channel. + Will be None in all cases except for 'pmessage' types. +- **data**: The message data. With [un]subscribe messages, this value + will be the number of channels and patterns the connection is + currently subscribed to. With [p]message messages, this value will be + the actual published message. + +Let's send a message now. + +.. code:: python + + # the publish method returns the number matching channel and pattern + # subscriptions. 'my-first-channel' matches both the 'my-first-channel' + # subscription and the 'my-*' pattern subscription, so this message will + # be delivered to 2 channels/patterns + >>> r.publish('my-first-channel', 'some data') + 2 + >>> p.get_message() + {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'} + >>> p.get_message() + {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'} + +Unsubscribing works just like subscribing. If no arguments are passed to +[p]unsubscribe, all channels or patterns will be unsubscribed from. + +.. code:: python + + >>> p.unsubscribe() + >>> p.punsubscribe('my-*') + >>> p.get_message() + {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} + +redis-py also allows you to register callback functions to handle +published messages. Message handlers take a single argument, the +message, which is a dictionary just like the examples above. To +subscribe to a channel or pattern with a message handler, pass the +channel or pattern name as a keyword argument with its value being the +callback function. + +When a message is read on a channel or pattern with a message handler, +the message dictionary is created and passed to the message handler. In +this case, a None value is returned from get_message() since the message +was already handled. + +.. code:: python + + >>> def my_handler(message): + ... print('MY HANDLER: ', message['data']) + >>> p.subscribe(**{'my-channel': my_handler}) + # read the subscribe confirmation message + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1} + >>> r.publish('my-channel', 'awesome data') + 1 + # for the message handler to work, we need tell the instance to read data. + # this can be done in several ways (read more below). we'll just use + # the familiar get_message() function for now + >>> message = p.get_message() + MY HANDLER: awesome data + # note here that the my_handler callback printed the string above. + # `message` is None because the message was handled by our handler. + >>> print(message) + None + +If your application is not interested in the (sometimes noisy) +subscribe/unsubscribe confirmation messages, you can ignore them by +passing ignore_subscribe_messages=True to r.pubsub(). This will cause +all subscribe/unsubscribe messages to be read, but they won't bubble up +to your application. + +.. code:: python + + >>> p = r.pubsub(ignore_subscribe_messages=True) + >>> p.subscribe('my-channel') + >>> p.get_message() # hides the subscribe message and returns None + >>> r.publish('my-channel', 'my data') + 1 + >>> p.get_message() + {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'} + +There are three different strategies for reading messages. + +The examples above have been using pubsub.get_message(). Behind the +scenes, get_message() uses the system's 'select' module to quickly poll +the connection's socket. If there's data available to be read, +get_message() will read it, format the message and return it or pass it +to a message handler. If there's no data to be read, get_message() will +immediately return None. This makes it trivial to integrate into an +existing event loop inside your application. + +.. code:: python + + >>> while True: + >>> message = p.get_message() + >>> if message: + >>> # do something with the message + >>> time.sleep(0.001) # be nice to the system :) + +Older versions of redis-py only read messages with pubsub.listen(). +listen() is a generator that blocks until a message is available. If +your application doesn't need to do anything else but receive and act on +messages received from redis, listen() is an easy way to get up an +running. + +.. code:: python + + >>> for message in p.listen(): + ... # do something with the message + +The third option runs an event loop in a separate thread. +pubsub.run_in_thread() creates a new thread and starts the event loop. +The thread object is returned to the caller of run_in_thread(). The +caller can use the thread.stop() method to shut down the event loop and +thread. Behind the scenes, this is simply a wrapper around get_message() +that runs in a separate thread, essentially creating a tiny non-blocking +event loop for you. run_in_thread() takes an optional sleep_time +argument. If specified, the event loop will call time.sleep() with the +value in each iteration of the loop. + +Note: Since we're running in a separate thread, there's no way to handle +messages that aren't automatically handled with registered message +handlers. Therefore, redis-py prevents you from calling run_in_thread() +if you're subscribed to patterns or channels that don't have message +handlers attached. + +.. code:: python + + >>> p.subscribe(**{'my-channel': my_handler}) + >>> thread = p.run_in_thread(sleep_time=0.001) + # the event loop is now running in the background processing messages + # when it's time to shut it down... + >>> thread.stop() + +run_in_thread also supports an optional exception handler, which lets +you catch exceptions that occur within the worker thread and handle them +appropriately. The exception handler will take as arguments the +exception itself, the pubsub object, and the worker thread returned by +run_in_thread. + +.. code:: python + + >>> p.subscribe(**{'my-channel': my_handler}) + >>> def exception_handler(ex, pubsub, thread): + >>> print(ex) + >>> thread.stop() + >>> thread = p.run_in_thread(exception_handler=exception_handler) + +A PubSub object adheres to the same encoding semantics as the client +instance it was created from. Any channel or pattern that's unicode will +be encoded using the encoding specified on the client before being sent +to Redis. If the client's decode_responses flag is set the False (the +default), the 'channel', 'pattern' and 'data' values in message +dictionaries will be byte strings (str on Python 2, bytes on Python 3). +If the client's decode_responses is True, then the 'channel', 'pattern' +and 'data' values will be automatically decoded to unicode strings using +the client's encoding. + +PubSub objects remember what channels and patterns they are subscribed +to. In the event of a disconnection such as a network error or timeout, +the PubSub object will re-subscribe to all prior channels and patterns +when reconnecting. Messages that were published while the client was +disconnected cannot be delivered. When you're finished with a PubSub +object, call its .close() method to shutdown the connection. + +.. code:: python + + >>> p = r.pubsub() + >>> ... + >>> p.close() + +The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also +supported: + +.. code:: python + + >>> r.pubsub_channels() + [b'foo', b'bar'] + >>> r.pubsub_numsub('foo', 'bar') + [(b'foo', 9001), (b'bar', 42)] + >>> r.pubsub_numsub('baz') + [(b'baz', 0)] + >>> r.pubsub_numpat() + 1204 + +Sharded pubsub +~~~~~~~~~~~~~~ + +`Sharded pubsub `_ is a feature introduced with Redis 7.0, and fully supported by redis-py as of 5.0. It helps scale the usage of pub/sub in cluster mode, by having the cluster shard messages to nodes that own a slot for a shard channel. Here, the cluster ensures the published shard messages are forwarded to the appropriate nodes. Clients subscribe to a channel by connecting to either the master responsible for the slot, or any of its replicas. + +This makes use of the `SSUBSCRIBE `_ and `SPUBLISH `_ commands within Redis. + +The following, is a simplified example: + +.. code:: python + + >>> from redis.cluster import RedisCluster, ClusterNode + >>> r = RedisCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)]) + >>> p = r.pubsub() + >>> p.ssubscribe('foo') + >>> # assume someone sends a message along the channel via a publish + >>> message = p.get_sharded_message() + +Similarly, the same process can be used to acquire sharded pubsub messages, that have already been sent to a specific node, by passing the node to get_sharded_message: + +.. code:: python + + >>> from redis.cluster import RedisCluster, ClusterNode + >>> first_node = ClusterNode['localhost', 6379] + >>> second_node = ClusterNode['localhost', 6380] + >>> r = RedisCluster(startup_nodes=[first_node, second_node]) + >>> p = r.pubsub() + >>> p.ssubscribe('foo') + >>> # assume someone sends a message along the channel via a publish + >>> message = p.get_sharded_message(target_node=second_node) + + +Monitor +~~~~~~~ + +redis-py includes a Monitor object that streams every command processed +by the Redis server. Use listen() on the Monitor object to block until a +command is received. + +.. code:: python + + >>> r = redis.Redis(...) + >>> with r.monitor() as m: + >>> for command in m.listen(): + >>> print(command) diff --git a/docs/backoff.rst b/docs/backoff.rst new file mode 100644 index 0000000000..c5ab01ab03 --- /dev/null +++ b/docs/backoff.rst @@ -0,0 +1,7 @@ +.. _backoff-label: + +Backoff +############# + +.. automodule:: redis.backoff + :members: \ No newline at end of file diff --git a/docs/clustering.rst b/docs/clustering.rst new file mode 100644 index 0000000000..3c28b9ee16 --- /dev/null +++ b/docs/clustering.rst @@ -0,0 +1,245 @@ +Clustering +========== + +redis-py now supports cluster mode and provides a client for `Redis +Cluster `__. + +The cluster client is based on Grokzen’s +`redis-py-cluster `__, has +added bug fixes, and now supersedes that library. Support for these +changes is thanks to his contributions. + +To learn more about Redis Cluster, see `Redis Cluster +specifications `__. + +`Creating clusters <#creating-clusters>`__ \| `Specifying Target +Nodes <#specifying-target-nodes>`__ \| `Multi-key +Commands <#multi-key-commands>`__ \| `Known PubSub +Limitations <#known-pubsub-limitations>`__ + +Connecting to cluster +--------------------- + +Connecting redis-py to a Redis Cluster instance(s) requires at a minimum +a single node for cluster discovery. There are multiple ways in which a +cluster instance can be created: + +- Using ‘host’ and ‘port’ arguments: + +.. code:: python + + >>> from redis.cluster import RedisCluster as Redis + >>> rc = Redis(host='localhost', port=6379) + >>> print(rc.get_nodes()) + [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis>>]] + +- Using the Redis URL specification: + +.. code:: python + + >>> from redis.cluster import RedisCluster as Redis + >>> rc = Redis.from_url("redis://localhost:6379/0") + +- Directly, via the ClusterNode class: + +.. code:: python + + >>> from redis.cluster import RedisCluster as Redis + >>> from redis.cluster import ClusterNode + >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)] + >>> rc = Redis(startup_nodes=nodes) + +When a RedisCluster instance is being created it first attempts to +establish a connection to one of the provided startup nodes. If none of +the startup nodes are reachable, a ‘RedisClusterException’ will be +thrown. After a connection to the one of the cluster’s nodes is +established, the RedisCluster instance will be initialized with 3 +caches: a slots cache which maps each of the 16384 slots to the node/s +handling them, a nodes cache that contains ClusterNode objects (name, +host, port, redis connection) for all of the cluster’s nodes, and a +commands cache contains all the server supported commands that were +retrieved using the Redis ‘COMMAND’ output. See *RedisCluster specific +options* below for more. + +RedisCluster instance can be directly used to execute Redis commands. +When a command is being executed through the cluster instance, the +target node(s) will be internally determined. When using a key-based +command, the target node will be the node that holds the key’s slot. +Cluster management commands and other commands that are not key-based +have a parameter called ‘target_nodes’ where you can specify which nodes +to execute the command on. In the absence of target_nodes, the command +will be executed on the default cluster node. As part of cluster +instance initialization, the cluster’s default node is randomly selected +from the cluster’s primaries, and will be updated upon reinitialization. +Using r.get_default_node(), you can get the cluster’s default node, or +you can change it using the ‘set_default_node’ method. + +The ‘target_nodes’ parameter is explained in the following section, +‘Specifying Target Nodes’. + +.. code:: python + + >>> # target-nodes: the node that holds 'foo1's key slot + >>> rc.set('foo1', 'bar1') + >>> # target-nodes: the node that holds 'foo2's key slot + >>> rc.set('foo2', 'bar2') + >>> # target-nodes: the node that holds 'foo1's key slot + >>> print(rc.get('foo1')) + b'bar' + >>> # target-node: default-node + >>> print(rc.keys()) + [b'foo1'] + >>> # target-node: default-node + >>> rc.ping() + +Specifying Target Nodes +----------------------- + +As mentioned above, all non key-based RedisCluster commands accept the +kwarg parameter ‘target_nodes’ that specifies the node/nodes that the +command should be executed on. The best practice is to specify target +nodes using RedisCluster class’s node flags: PRIMARIES, REPLICAS, +ALL_NODES, RANDOM. When a nodes flag is passed along with a command, it +will be internally resolved to the relevant node/s. If the nodes +topology of the cluster changes during the execution of a command, the +client will be able to resolve the nodes flag again with the new +topology and attempt to retry executing the command. + +.. code:: python + + >>> from redis.cluster import RedisCluster as Redis + >>> # run cluster-meet command on all of the cluster's nodes + >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES) + >>> # ping all replicas + >>> rc.ping(target_nodes=Redis.REPLICAS) + >>> # ping a random node + >>> rc.ping(target_nodes=Redis.RANDOM) + >>> # get the keys from all cluster nodes + >>> rc.keys(target_nodes=Redis.ALL_NODES) + [b'foo1', b'foo2'] + >>> # execute bgsave in all primaries + >>> rc.bgsave(Redis.PRIMARIES) + +You could also pass ClusterNodes directly if you want to execute a +command on a specific node / node group that isn’t addressed by the +nodes flag. However, if the command execution fails due to cluster +topology changes, a retry attempt will not be made, since the passed +target node/s may no longer be valid, and the relevant cluster or +connection error will be returned. + +.. code:: python + + >>> node = rc.get_node('localhost', 6379) + >>> # Get the keys only for that specific node + >>> rc.keys(target_nodes=node) + >>> # get Redis info from a subset of primaries + >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378] + >>> rc.info(target_nodes=subset_primaries) + +In addition, the RedisCluster instance can query the Redis instance of a +specific node and execute commands on that node directly. The Redis +client, however, does not handle cluster failures and retries. + +.. code:: python + + >>> cluster_node = rc.get_node(host='localhost', port=6379) + >>> print(cluster_node) + [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>] + >>> r = cluster_node.redis_connection + >>> r.client_list() + [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}] + >>> # Get the keys only for that specific node + >>> r.keys() + [b'foo1'] + +Multi-key Commands +------------------ + +Redis supports multi-key commands in Cluster Mode, such as Set type +unions or intersections, mset and mget, as long as the keys all hash to +the same slot. By using RedisCluster client, you can use the known +functions (e.g. mget, mset) to perform an atomic multi-key operation. +However, you must ensure all keys are mapped to the same slot, otherwise +a RedisClusterException will be thrown. Redis Cluster implements a +concept called hash tags that can be used in order to force certain keys +to be stored in the same hash slot, see `Keys hash +tag `__. You can +also use nonatomic for some of the multikey operations, and pass keys +that aren’t mapped to the same slot. The client will then map the keys +to the relevant slots, sending the commands to the slots’ node owners. +Non-atomic operations batch the keys according to their hash value, and +then each batch is sent separately to the slot’s owner. + +.. code:: python + + # Atomic operations can be used when all keys are mapped to the same slot + >>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'}) + >>> rc.mget('{foo}1', '{foo}2') + [b'bar1', b'bar2'] + # Non-atomic multi-key operations splits the keys into different slots + >>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3') + >>> rc.mget_nonatomic('foo', 'bar', 'zzz') + [b'value1', b'value2', b'value3'] + +**Cluster PubSub:** + +When a ClusterPubSub instance is created without specifying a node, a +single node will be transparently chosen for the pubsub connection on +the first command execution. The node will be determined by: 1. Hashing +the channel name in the request to find its keyslot 2. Selecting a node +that handles the keyslot: If read_from_replicas is set to true or +load_balancing_strategy is provided, a replica can be selected. + +Known PubSub Limitations +------------------------ + +Pattern subscribe and publish do not currently work properly due to key +slots. If we hash a pattern like fo\* we will receive a keyslot for that +string but there are endless possibilities for channel names based on +this pattern - unknowable in advance. This feature is not disabled but +the commands are not currently recommended for use. See +`redis-py-cluster +documentation `__ +for more. + +.. code:: python + + >>> p1 = rc.pubsub() + # p1 connection will be set to the node that holds 'foo' keyslot + >>> p1.subscribe('foo') + # p2 connection will be set to node 'localhost:6379' + >>> p2 = rc.pubsub(rc.get_node('localhost', 6379)) + +**Read Only Mode** + +By default, Redis Cluster always returns MOVE redirection response on +accessing a replica node. You can overcome this limitation and scale +read commands by triggering READONLY mode. + +To enable READONLY mode pass read_from_replicas=True or define +a load_balancing_strategy to RedisCluster constructor. +When read_from_replicas is set to true read commands will be assigned between +the primary and its replications in a Round-Robin manner. +With load_balancing_strategy you can define a custom strategy for +assigning read commands to the replicas and primary nodes. + +READONLY mode can be set at runtime by calling the readonly() method +with target_nodes=‘replicas’, and read-write access can be restored by +calling the readwrite() method. + +.. code:: python + + >>> from cluster import RedisCluster as Redis + # Use 'debug' log level to print the node that the command is executed on + >>> rc_readonly = Redis(startup_nodes=startup_nodes, + ... read_from_replicas=True) + >>> rc_readonly.set('{foo}1', 'bar1') + >>> for i in range(0, 4): + ... # Assigns read command to the slot's hosts in a Round-Robin manner + ... rc_readonly.get('{foo}1') + # set command would be directed only to the slot's primary node + >>> rc_readonly.set('{foo}2', 'bar2') + # reset READONLY flag + >>> rc_readonly.readwrite(target_nodes='replicas') + # now the get command would be directed only to the slot's primary node + >>> rc_readonly.get('{foo}1') diff --git a/docs/commands.rst b/docs/commands.rst new file mode 100644 index 0000000000..d35f290ace --- /dev/null +++ b/docs/commands.rst @@ -0,0 +1,30 @@ +Redis Commands +############## + +Core Commands +************* + +The following functions can be used to replicate their equivalent `Redis command `_. Generally they can be used as functions on your redis connection. For the simplest example, see below: + +Getting and settings data in redis:: + + import redis + r = redis.Redis(decode_responses=True) + r.set('mykey', 'thevalueofmykey') + r.get('mykey') + +.. autoclass:: redis.commands.core.CoreCommands + :inherited-members: + +Sentinel Commands +***************** +.. autoclass:: redis.commands.sentinel.SentinelCommands + :inherited-members: + +Redis Cluster Commands +********************** + +The following `Redis commands `_ are available within a `Redis Cluster `_. Generally they can be used as functions on your redis connection. + +.. autoclass:: redis.commands.cluster.RedisClusterCommands + :inherited-members: diff --git a/docs/conf.py b/docs/conf.py index 690be037db..865bd522a2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # redis-py documentation build configuration file, created by # sphinx-quickstart on Fri Feb 8 00:47:08 2013. # @@ -12,218 +10,257 @@ # All configuration values have a default; values that are commented out # serve to show the default. +import datetime import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) sys.path.append(os.path.abspath(os.path.pardir)) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode'] +extensions = [ + "nbsphinx", + "sphinx_gallery.load_style", + "sphinx.ext.autodoc", + "sphinx.ext.viewcode", + "sphinx.ext.autosectionlabel", + "sphinx.ext.napoleon", +] + +# Napoleon settings. We only accept Google-style docstrings. +napoleon_google_docstring = True +napoleon_numpy_docstring = False + +# AutosectionLabel settings. +# Uses a :