diff --git a/.ci/.matrix_exclude.yml b/.ci/.matrix_exclude.yml index eaf0fa82b..c7089061f 100644 --- a/.ci/.matrix_exclude.yml +++ b/.ci/.matrix_exclude.yml @@ -9,40 +9,85 @@ exclude: FRAMEWORK: django-4.0 - VERSION: python-3.7 FRAMEWORK: django-4.0 + # Django 4.2 requires Python 3.8+ + - VERSION: python-3.6 + FRAMEWORK: django-4.2 + - VERSION: python-3.7 + FRAMEWORK: django-4.2 + # Django 5.0 requires Python 3.10+ + - VERSION: python-3.6 + FRAMEWORK: django-5.0 + - VERSION: python-3.7 + FRAMEWORK: django-5.0 + - VERSION: python-3.8 + FRAMEWORK: django-5.0 + - VERSION: python-3.9 + FRAMEWORK: django-5.0 - VERSION: pypy-3 # current pypy-3 is compatible with Python 3.7 FRAMEWORK: celery-5-django-4 - VERSION: python-3.6 FRAMEWORK: celery-5-django-4 - VERSION: python-3.7 FRAMEWORK: celery-5-django-4 + - VERSION: python-3.6 + FRAMEWORK: celery-5-django-5 + - VERSION: python-3.7 + FRAMEWORK: celery-5-django-5 + - VERSION: python-3.8 + FRAMEWORK: celery-5-django-5 + - VERSION: python-3.9 + FRAMEWORK: celery-5-django-5 + # Elasticsearch + - VERSION: python-3.6 + FRAMEWORK: elasticsearch-9 + - VERSION: python-3.7 + FRAMEWORK: elasticsearch-9 # Flask - VERSION: pypy-3 FRAMEWORK: flask-0.11 # see https://github.com/pallets/flask/commit/6e46d0cd, 0.11.2 was never released + - VERSION: python-3.6 + FRAMEWORK: flask-2.1 + - VERSION: python-3.6 + FRAMEWORK: flask-2.2 + - VERSION: python-3.6 + FRAMEWORK: flask-2.3 + - VERSION: python-3.6 + FRAMEWORK: flask-3.0 + - VERSION: python-3.7 + FRAMEWORK: flask-2.3 + - VERSION: python-3.7 + FRAMEWORK: flask-3.0 + - VERSION: python-3.14 + FRAMEWORK: flask-1.0 # Python 3.10 removed a bunch of classes from collections, now in collections.abc - VERSION: python-3.10 FRAMEWORK: django-1.11 - VERSION: python-3.10 FRAMEWORK: django-2.0 - - VERSION: python-3.10 - FRAMEWORK: celery-4-django-2.0 - - VERSION: python-3.10 - FRAMEWORK: celery-4-django-1.11 - - VERSION: python-3.11 # cannot import name 'formatargspec' from 'inspect' - FRAMEWORK: celery-4-flask-1.0 - VERSION: python-3.11 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-flask-2 - VERSION: python-3.11 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-django-3 - VERSION: python-3.11 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-django-4 - - VERSION: python-3.12 # cannot import name 'formatargspec' from 'inspect' - FRAMEWORK: celery-4-flask-1.0 - VERSION: python-3.12 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-flask-2 - VERSION: python-3.12 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-django-3 - VERSION: python-3.12 # https://github.com/celery/billiard/issues/377 FRAMEWORK: celery-5-django-4 + - VERSION: python-3.13 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-flask-2 + - VERSION: python-3.13 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-django-3 + - VERSION: python-3.13 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-django-4 + - VERSION: python-3.14 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-flask-2 + - VERSION: python-3.14 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-django-3 + - VERSION: python-3.14 # https://github.com/celery/billiard/issues/377 + FRAMEWORK: celery-5-django-4 - VERSION: python-3.10 FRAMEWORK: graphene-2 - VERSION: python-3.10 @@ -59,10 +104,6 @@ exclude: FRAMEWORK: django-2.0 - VERSION: python-3.11 FRAMEWORK: django-2.1 - - VERSION: python-3.11 - FRAMEWORK: celery-4-django-2.0 - - VERSION: python-3.11 - FRAMEWORK: celery-4-django-1.11 - VERSION: python-3.11 FRAMEWORK: graphene-2 - VERSION: python-3.11 @@ -79,10 +120,6 @@ exclude: FRAMEWORK: django-2.0 - VERSION: python-3.12 FRAMEWORK: django-2.1 - - VERSION: python-3.12 - FRAMEWORK: celery-4-django-2.0 - - VERSION: python-3.12 - FRAMEWORK: celery-4-django-1.11 - VERSION: python-3.12 FRAMEWORK: graphene-2 - VERSION: python-3.12 @@ -93,6 +130,60 @@ exclude: FRAMEWORK: cassandra-3.4 - VERSION: python-3.12 FRAMEWORK: pymongo-3.5 + - VERSION: python-3.13 + FRAMEWORK: django-1.11 + - VERSION: python-3.13 + FRAMEWORK: django-2.0 + - VERSION: python-3.13 + FRAMEWORK: django-2.1 + - VERSION: python-3.13 + FRAMEWORK: django-2.2 + - VERSION: python-3.13 + FRAMEWORK: django-3.0 + - VERSION: python-3.13 + FRAMEWORK: django-3.1 + - VERSION: python-3.13 + FRAMEWORK: django-3.2 + - VERSION: python-3.13 + FRAMEWORK: django-4.0 + - VERSION: python-3.13 + FRAMEWORK: graphene-2 + - VERSION: python-3.13 + FRAMEWORK: aiohttp-3.0 + - VERSION: python-3.13 + FRAMEWORK: aiohttp-4.0 + - VERSION: python-3.13 + FRAMEWORK: cassandra-3.4 + - VERSION: python-3.14 + FRAMEWORK: django-1.11 + - VERSION: python-3.14 + FRAMEWORK: django-2.0 + - VERSION: python-3.14 + FRAMEWORK: django-2.1 + - VERSION: python-3.14 + FRAMEWORK: django-2.2 + - VERSION: python-3.14 + FRAMEWORK: django-3.0 + - VERSION: python-3.14 + FRAMEWORK: django-3.1 + - VERSION: python-3.14 + FRAMEWORK: django-3.2 + - VERSION: python-3.14 + FRAMEWORK: django-4.0 + - VERSION: python-3.14 + FRAMEWORK: django-4.2 + - VERSION: python-3.14 + FRAMEWORK: django-5.0 + - VERSION: python-3.14 + FRAMEWORK: graphene-2 + - VERSION: python-3.14 + FRAMEWORK: aiohttp-3.0 + - VERSION: python-3.14 + FRAMEWORK: aiohttp-4.0 + - VERSION: python-3.14 + FRAMEWORK: cassandra-3.4 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.5 # pymongo - VERSION: python-3.10 FRAMEWORK: pymongo-3.1 @@ -100,18 +191,30 @@ exclude: FRAMEWORK: pymongo-3.1 - VERSION: python-3.12 FRAMEWORK: pymongo-3.1 + - VERSION: python-3.13 + FRAMEWORK: pymongo-3.1 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.1 - VERSION: python-3.10 FRAMEWORK: pymongo-3.2 - VERSION: python-3.11 FRAMEWORK: pymongo-3.2 - VERSION: python-3.12 FRAMEWORK: pymongo-3.2 + - VERSION: python-3.13 + FRAMEWORK: pymongo-3.2 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.2 - VERSION: python-3.10 FRAMEWORK: pymongo-3.3 - VERSION: python-3.11 FRAMEWORK: pymongo-3.3 - VERSION: python-3.12 FRAMEWORK: pymongo-3.3 + - VERSION: python-3.13 + FRAMEWORK: pymongo-3.3 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.3 - VERSION: python-3.8 FRAMEWORK: pymongo-3.4 - VERSION: python-3.9 @@ -122,6 +225,14 @@ exclude: FRAMEWORK: pymongo-3.4 - VERSION: python-3.12 FRAMEWORK: pymongo-3.4 + - VERSION: python-3.13 + FRAMEWORK: pymongo-3.4 + - VERSION: python-3.13 + FRAMEWORK: pymongo-3.5 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.4 + - VERSION: python-3.14 + FRAMEWORK: pymongo-3.5 - VERSION: pypy-3 FRAMEWORK: pymongo-3.0 # pymssql @@ -145,6 +256,14 @@ exclude: FRAMEWORK: boto3-1.5 - VERSION: python-3.12 FRAMEWORK: boto3-1.6 + - VERSION: python-3.13 + FRAMEWORK: boto3-1.5 + - VERSION: python-3.13 + FRAMEWORK: boto3-1.6 + - VERSION: python-3.14 + FRAMEWORK: boto3-1.5 + - VERSION: python-3.14 + FRAMEWORK: boto3-1.6 # aiohttp client, only supported in Python 3.7+ - VERSION: pypy-3 FRAMEWORK: aiohttp-3.0 @@ -184,8 +303,16 @@ exclude: # asyncpg - VERSION: pypy-3 FRAMEWORK: asyncpg-newest + - VERSION: pypy-3 + FRAMEWORK: asyncpg-0.28 - VERSION: python-3.6 FRAMEWORK: asyncpg-newest + - VERSION: python-3.6 + FRAMEWORK: asyncpg-0.28 + - VERSION: python-3.13 + FRAMEWORK: asyncpg-0.28 + - VERSION: python-3.14 + FRAMEWORK: asyncpg-0.28 # sanic - VERSION: pypy-3 FRAMEWORK: sanic-newest @@ -195,8 +322,14 @@ exclude: FRAMEWORK: sanic-20.12 - VERSION: python-3.6 FRAMEWORK: sanic-newest + - VERSION: python-3.8 + FRAMEWORK: sanic-newest + - VERSION: python-3.13 + FRAMEWORK: sanic-20.12 + - VERSION: python-3.14 + FRAMEWORK: sanic-20.12 + # aioredis - VERSION: pypy-3 - # aioredis FRAMEWORK: aioredis-newest - VERSION: python-3.6 FRAMEWORK: aioredis-newest @@ -230,11 +363,31 @@ exclude: FRAMEWORK: twisted-16 - VERSION: python-3.12 FRAMEWORK: twisted-15 + - VERSION: python-3.13 + FRAMEWORK: twisted-18 + - VERSION: python-3.13 + FRAMEWORK: twisted-17 + - VERSION: python-3.13 + FRAMEWORK: twisted-16 + - VERSION: python-3.13 + FRAMEWORK: twisted-15 + - VERSION: python-3.14 + FRAMEWORK: twisted-18 + - VERSION: python-3.14 + FRAMEWORK: twisted-17 + - VERSION: python-3.14 + FRAMEWORK: twisted-16 + - VERSION: python-3.14 + FRAMEWORK: twisted-15 # pylibmc - VERSION: python-3.11 FRAMEWORK: pylibmc-1.4 - VERSION: python-3.12 FRAMEWORK: pylibmc-1.4 + - VERSION: python-3.13 + FRAMEWORK: pylibmc-1.4 + - VERSION: python-3.14 + FRAMEWORK: pylibmc-1.4 # grpc - VERSION: python-3.6 FRAMEWORK: grpc-newest @@ -250,32 +403,33 @@ exclude: FRAMEWORK: grpc-1.24 - VERSION: python-3.12 FRAMEWORK: grpc-1.24 + - VERSION: python-3.13 + FRAMEWORK: grpc-1.24 + - VERSION: python-3.14 + FRAMEWORK: grpc-1.24 - VERSION: python-3.7 FRAMEWORK: flask-1.0 - VERSION: python-3.7 FRAMEWORK: flask-1.1 - VERSION: python-3.7 FRAMEWORK: jinja2-2 - - VERSION: python-3.7 - FRAMEWORK: celery-4-flask-1.0 # TODO py3.12 - - VERSION: python-3.12 - FRAMEWORK: pymssql-newest # no wheels available yet - - VERSION: python-3.12 - FRAMEWORK: aiohttp-newest # no wheels available yet - - VERSION: python-3.12 - FRAMEWORK: elasticsearch-7 # relies on aiohttp - - VERSION: python-3.12 - FRAMEWORK: elasticsearch-8 # relies on aiohttp - - VERSION: python-3.12 - FRAMEWORK: aiobotocore-newest # relies on aiohttp - VERSION: python-3.12 FRAMEWORK: sanic-20.12 # no wheels available yet - - VERSION: python-3.12 - FRAMEWORK: sanic-newest # no wheels available yet - - VERSION: python-3.12 - FRAMEWORK: kafka-python-newest # https://github.com/dpkp/kafka-python/pull/2376 - - VERSION: python-3.12 - FRAMEWORK: pyodbc-newest # error on wheel - - VERSION: python-3.12 + - VERSION: python-3.13 + FRAMEWORK: cassandra-newest # c extension issue + - VERSION: python-3.14 FRAMEWORK: cassandra-newest # c extension issue + # httpx + - VERSION: python-3.13 + FRAMEWORK: httpx-0.13 + - VERSION: python-3.13 + FRAMEWORK: httpx-0.14 + - VERSION: python-3.13 + FRAMEWORK: httpx-0.21 + - VERSION: python-3.14 + FRAMEWORK: httpx-0.13 + - VERSION: python-3.14 + FRAMEWORK: httpx-0.14 + - VERSION: python-3.14 + FRAMEWORK: httpx-0.21 diff --git a/.ci/.matrix_framework.yml b/.ci/.matrix_framework.yml index 6bf64ab44..64d1dc8ef 100644 --- a/.ci/.matrix_framework.yml +++ b/.ci/.matrix_framework.yml @@ -3,21 +3,20 @@ FRAMEWORK: - none - django-1.11 - - django-2.0 - - django-3.1 - django-3.2 - django-4.0 + - django-4.2 + - django-5.0 - flask-0.12 - - flask-1.1 - - flask-2.0 + - flask-2.3 + - flask-3.0 - jinja2-3 - opentelemetry-newest - opentracing-newest - twisted-newest - - celery-4-flask-1.0 - - celery-4-django-2.0 - celery-5-flask-2 - celery-5-django-4 + - celery-5-django-5 - requests-newest - boto3-newest - pymongo-newest @@ -30,9 +29,9 @@ FRAMEWORK: - pyodbc-newest - memcached-newest - pylibmc-newest - - elasticsearch-2 - elasticsearch-7 - elasticsearch-8 + - elasticsearch-9 - cassandra-newest - psutil-newest #- eventlet-newest @@ -44,6 +43,8 @@ FRAMEWORK: - aiopg-newest - asyncpg-newest - tornado-newest + # this has a dependency on requests, run it to catch update issues before merging. Drop after baseline > 0.21.0 + - starlette-0.14 - starlette-newest - pymemcache-newest - graphene-2 @@ -55,3 +56,5 @@ FRAMEWORK: - aiobotocore-newest - kafka-python-newest - grpc-newest + - azurefunctions-newest + - azure-newest diff --git a/.ci/.matrix_framework_fips.yml b/.ci/.matrix_framework_fips.yml new file mode 100644 index 000000000..6bbc9cd3e --- /dev/null +++ b/.ci/.matrix_framework_fips.yml @@ -0,0 +1,23 @@ +# this is a limited list of matrix builds to be used for PRs +# see .matrix_framework_full.yml for a full list +FRAMEWORK: + - none + - django-5.0 + - flask-3.0 + - jinja2-3 + - opentelemetry-newest + - opentracing-newest + - twisted-newest + - celery-5-flask-2 + - celery-5-django-5 + - requests-newest + - psutil-newest + - gevent-newest + - aiohttp-newest + - tornado-newest + - starlette-newest + - graphene-2 + - httpx-newest + - httplib2-newest + - prometheus_client-newest + - sanic-newest diff --git a/.ci/.matrix_framework_full.yml b/.ci/.matrix_framework_full.yml index 17d7198fe..6b3a6ea08 100644 --- a/.ci/.matrix_framework_full.yml +++ b/.ci/.matrix_framework_full.yml @@ -10,6 +10,8 @@ FRAMEWORK: - django-3.1 - django-3.2 - django-4.0 + - django-4.2 + - django-5.0 # - django-master - flask-0.10 - flask-0.11 @@ -17,14 +19,16 @@ FRAMEWORK: - flask-1.0 - flask-1.1 - flask-2.0 + - flask-2.1 + - flask-2.2 + - flask-2.3 + - flask-3.0 - jinja2-2 - jinja2-3 - - celery-4-flask-1.0 - - celery-4-django-1.11 - - celery-4-django-2.0 - celery-5-flask-2 - celery-5-django-3 - celery-5-django-4 + - celery-5-django-5 - opentelemetry-newest - opentracing-newest - opentracing-2.0 @@ -42,6 +46,7 @@ FRAMEWORK: - pymongo-3.3 - pymongo-3.4 - pymongo-3.5 + - pymongo-3.6 - pymongo-newest - redis-3 - redis-2 @@ -58,15 +63,14 @@ FRAMEWORK: - psutil-5.0 - psutil-4.0 #- eventlet-newest - - elasticsearch-2 - - elasticsearch-5 - - elasticsearch-6 - elasticsearch-7 - elasticsearch-8 + - elasticsearch-9 - gevent-newest - aiohttp-3.0 - aiohttp-newest - aiopg-newest + - asyncpg-0.28 - asyncpg-newest - tornado-newest - starlette-0.13 @@ -86,3 +90,5 @@ FRAMEWORK: - kafka-python-newest - grpc-newest #- grpc-1.24 # This appears to have problems with python>3.6? + - azurefunctions-newest + - azure-newest diff --git a/.ci/.matrix_python.yml b/.ci/.matrix_python.yml index dbb9c7bf6..86c87ad88 100644 --- a/.ci/.matrix_python.yml +++ b/.ci/.matrix_python.yml @@ -1,3 +1,3 @@ VERSION: - python-3.6 - - python-3.12 + - python-3.13 diff --git a/.ci/.matrix_python_fips.yml b/.ci/.matrix_python_fips.yml new file mode 100644 index 000000000..01cf811ac --- /dev/null +++ b/.ci/.matrix_python_fips.yml @@ -0,0 +1,2 @@ +VERSION: + - python-3.12 diff --git a/.ci/.matrix_python_full.yml b/.ci/.matrix_python_full.yml index 03fead7ab..56b272fbc 100644 --- a/.ci/.matrix_python_full.yml +++ b/.ci/.matrix_python_full.yml @@ -6,4 +6,6 @@ VERSION: - python-3.10 - python-3.11 - python-3.12 + - python-3.13 + - python-3.14 # - pypy-3 # excluded due to build issues with SQLite/Django diff --git a/.ci/.matrix_windows.yml b/.ci/.matrix_windows.yml deleted file mode 100644 index 0f12b9422..000000000 --- a/.ci/.matrix_windows.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This is the limited list of matrix builds in Windows, to be triggered on a PR basis -# The format is: -# VERSION: Major.Minor python version. -# FRAMEWORK: What framework to be tested. String format. -# ASYNCIO: Whether it's enabled or disabled. Boolean format. -# -# TODO: Remove this file when fully migrated to GH Actions - -windows: -# - VERSION: "3.6" -# FRAMEWORK: "none" -# ASYNCIO: "true" -# - VERSION: "3.7" -# FRAMEWORK: "none" -# ASYNCIO: "true" - - VERSION: "3.8" - FRAMEWORK: "none" - ASYNCIO: "true" - - VERSION: "3.9" # waiting for greenlet to have binary wheels for 3.9 - FRAMEWORK: "none" - ASYNCIO: "true" diff --git a/.ci/create-arn-table.sh b/.ci/create-arn-table.sh index 3105822ea..470d03c50 100755 --- a/.ci/create-arn-table.sh +++ b/.ci/create-arn-table.sh @@ -7,10 +7,13 @@ set -o pipefail # AWS_FOLDER - that's the location of the publish-layer-version output for each region AWS_FOLDER=${AWS_FOLDER?:No aws folder provided} -ARN_FILE=".arn-file.md" +# Get the repository root directory (where .git is located) +REPO_ROOT="$(realpath $(dirname "${BASH_SOURCE[0]}")/..)" +ARN_FILE="${REPO_ROOT}/.arn-file.md" { - echo "### Elastic APM Python agent layer ARNs" + echo "
" + echo "Elastic APM Python agent layer ARNs" echo '' echo '|Region|ARN|' echo '|------|---|' @@ -22,4 +25,10 @@ for f in $(ls "${AWS_FOLDER}"); do echo "|${f}|${LAYER_VERSION_ARN}|" >> "${ARN_FILE}" done -echo '' >> "${ARN_FILE}" +{ + echo '' + echo '
' + echo '' +} >> "${ARN_FILE}" + +echo "INFO: Created ARN table at ${ARN_FILE}" diff --git a/.ci/docker/README.md b/.ci/docker/README.md index 1241a7f05..9ca435c1c 100644 --- a/.ci/docker/README.md +++ b/.ci/docker/README.md @@ -2,7 +2,7 @@ Utility script for building and pushing the images based on `.ci/.matrix_python_full.yml`. -> :information_source: This script is mainly used in [publish-docker-images](https://github.com/elastic/apm-pipeline-library/actions/workflows/publish-docker-images.yml) workflow, +> :information_source: This script is mainly used in [publish-docker-images](https://github.com/elastic/apm-agent-python/actions/workflows/build-images.yml) workflow, which can be triggered safely at any time. ## Options diff --git a/.ci/docker/util.sh b/.ci/docker/util.sh index 9326a5773..458f77f29 100755 --- a/.ci/docker/util.sh +++ b/.ci/docker/util.sh @@ -43,8 +43,13 @@ for version in $versions; do case $ACTION in build) + cache_image="${full_image_name}" + # check that we have an image before using it as a cache + docker manifest inspect "${full_image_name}" || cache_image= + DOCKER_BUILDKIT=1 docker build \ - --cache-from="${full_image_name}" \ + --progress=plain \ + --cache-from="${cache_image}" \ -f "${project_root}/tests/Dockerfile" \ --build-arg PYTHON_IMAGE="${version/-/:}" \ -t "${full_image_name}" \ diff --git a/.ci/publish-aws.sh b/.ci/publish-aws.sh index aac092bad..3bb7a554c 100755 --- a/.ci/publish-aws.sh +++ b/.ci/publish-aws.sh @@ -46,7 +46,7 @@ for region in $ALL_AWS_REGIONS; do --layer-name="${FULL_LAYER_NAME}" \ --description="AWS Lambda Extension Layer for the Elastic APM Python Agent" \ --license-info="BSD-3-Clause" \ - --compatible-runtimes python3.6 python3.7 python3.8 python3.9 python3.10 python3.11\ + --compatible-runtimes python3.6 python3.7 python3.8 python3.9 python3.10 python3.11 python3.12 python3.13\ --zip-file="fileb://${zip_file}") echo "${publish_output}" > "${AWS_FOLDER}/${region}" layer_version=$(echo "${publish_output}" | jq '.Version') diff --git a/.ci/snapshoty.yml b/.ci/snapshoty.yml deleted file mode 100644 index ccebc3426..000000000 --- a/.ci/snapshoty.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- - -# Version of configuration to use -version: '1.0' - -# You can define a Google Cloud Account to use -account: - # Project id of the service account - project: '${GCS_PROJECT}' - # Private key id of the service account - private_key_id: '${GCS_PRIVATE_KEY_ID}' - # Private key of the service account - private_key: '${GCS_PRIVATE_KEY}' - # Email of the service account - client_email: '${GCS_CLIENT_EMAIL}' - # URI token - token_uri: 'https://oauth2.googleapis.com/token' - -# List of artifacts -artifacts: - # Path to use for artifacts discovery - - path: './dist' - # Files pattern to match - files_pattern: 'elastic_apm-(?P\d+\.\d+\.\d+)-(.*)\.whl' - # File layout on GCS bucket - output_pattern: '{project}/{github_branch_name}/elastic-apm-python-{app_version}-{github_sha_short}.whl' - # List of metadata processors to use. - metadata: - # Define static custom metadata - - name: 'custom' - data: - project: 'apm-agent-python' - # Add git metadata - - name: 'git' - # Add github_actions metadata - - name: 'github_actions' diff --git a/.ci/updatecli.d/update-gherkin-specs.yml b/.ci/updatecli.d/update-gherkin-specs.yml deleted file mode 100644 index 8deb269fc..000000000 --- a/.ci/updatecli.d/update-gherkin-specs.yml +++ /dev/null @@ -1,117 +0,0 @@ -name: update-gherkin-specs -pipelineid: update-gherkin-specs -title: synchronize gherkin specs - -scms: - default: - kind: github - spec: - user: '{{ requiredEnv "GIT_USER" }}' - email: '{{ requiredEnv "GIT_EMAIL" }}' - owner: elastic - repository: apm-agent-python - token: '{{ requiredEnv "GITHUB_TOKEN" }}' - username: '{{ requiredEnv "GIT_USER" }}' - branch: main - -sources: - sha: - kind: file - spec: - file: 'https://github.com/elastic/apm/commit/main.patch' - matchpattern: "^From\\s([0-9a-f]{40})\\s" - transformers: - - findsubmatch: - pattern: "[0-9a-f]{40}" - - api_key.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/api_key.feature - azure_app_service_metadata.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/azure_app_service_metadata.feature - azure_functions_metadata.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/azure_functions_metadata.feature - otel_bridge.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/otel_bridge.feature - outcome.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/outcome.feature - user_agent.feature: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/gherkin-specs/user_agent.feature - -actions: - pr: - kind: "github/pullrequest" - scmid: default - title: '[Automation] Update Gherkin specs' - spec: - automerge: false - draft: false - labels: - - "automation" - description: |- - ### What - APM agent Gherkin specs automatic sync - ### Why - *Changeset* - * https://github.com/elastic/apm/commit/{{ source "sha" }} - -targets: - api_key.feature: - name: api_key.feature - scmid: default - sourceid: api_key.feature - kind: file - spec: - file: tests/bdd/features/api_key.feature - forcecreate: true - azure_app_service_metadata.feature: - name: azure_app_service_metadata.feature - scmid: default - sourceid: azure_app_service_metadata.feature - kind: file - spec: - file: tests/bdd/features/azure_app_service_metadata.feature - forcecreate: true - azure_functions_metadata.feature: - name: azure_functions_metadata.feature - scmid: default - sourceid: azure_functions_metadata.feature - kind: file - spec: - file: tests/bdd/features/azure_functions_metadata.feature - forcecreate: true - otel_bridge.feature: - name: otel_bridge.feature - scmid: default - sourceid: otel_bridge.feature - kind: file - spec: - file: tests/bdd/features/otel_bridge.feature - forcecreate: true - outcome.feature: - name: outcome.feature - scmid: default - sourceid: outcome.feature - kind: file - spec: - file: tests/bdd/features/outcome.feature - forcecreate: true - user_agent.feature: - name: user_agent.feature - scmid: default - sourceid: user_agent.feature - kind: file - spec: - file: tests/bdd/features/user_agent.feature - forcecreate: true diff --git a/.ci/updatecli.d/update-json-specs.yml b/.ci/updatecli.d/update-json-specs.yml deleted file mode 100644 index 13d25c834..000000000 --- a/.ci/updatecli.d/update-json-specs.yml +++ /dev/null @@ -1,122 +0,0 @@ -name: update-json-specs -pipelineid: update-json-specs -title: synchronize json specs - -scms: - default: - kind: github - spec: - user: '{{ requiredEnv "GIT_USER" }}' - email: '{{ requiredEnv "GIT_EMAIL" }}' - owner: elastic - repository: apm-agent-python - token: '{{ requiredEnv "GITHUB_TOKEN" }}' - username: '{{ requiredEnv "GIT_USER" }}' - branch: main - -sources: - sha: - kind: file - spec: - file: 'https://github.com/elastic/apm/commit/main.patch' - matchpattern: "^From\\s([0-9a-f]{40})\\s" - transformers: - - findsubmatch: - pattern: "[0-9a-f]{40}" - - container_metadata_discovery.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/container_metadata_discovery.json - service_resource_inference.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/service_resource_inference.json - span_types.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/span_types.json - sql_signature_examples.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/sql_signature_examples.json - sql_token_examples.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/sql_token_examples.json - w3c_distributed_tracing.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/w3c_distributed_tracing.json - wildcard_matcher_tests.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm/main/tests/agents/json-specs/wildcard_matcher_tests.json - -actions: - pr: - kind: "github/pullrequest" - scmid: default - title: '[Automation] Update JSON specs' - spec: - automerge: false - draft: false - labels: - - "automation" - description: |- - ### What - APM agent specs automatic sync - ### Why - *Changeset* - * https://github.com/elastic/apm/commit/{{ source "sha" }} - -targets: - container_metadata_discovery.json: - name: container_metadata_discovery.json - scmid: default - sourceid: container_metadata_discovery.json - kind: file - spec: - file: tests/upstream/json-specs/container_metadata_discovery.json - service_resource_inference.json: - name: service_resource_inference.json - scmid: default - sourceid: service_resource_inference.json - kind: file - spec: - file: tests/upstream/json-specs/service_resource_inference.json - span_types.json: - name: span_types.json - scmid: default - sourceid: span_types.json - kind: file - spec: - file: tests/upstream/json-specs/span_types.json - sql_signature_examples.json: - name: sql_signature_examples.json - scmid: default - sourceid: sql_signature_examples.json - kind: file - spec: - file: tests/upstream/json-specs/sql_signature_examples.json - sql_token_examples.json: - name: sql_token_examples.json - scmid: default - sourceid: sql_token_examples.json - kind: file - spec: - file: tests/upstream/json-specs/sql_token_examples.json - w3c_distributed_tracing.json: - name: w3c_distributed_tracing.json - scmid: default - sourceid: w3c_distributed_tracing.json - kind: file - spec: - file: tests/upstream/json-specs/w3c_distributed_tracing.json - wildcard_matcher_tests.json: - name: wildcard_matcher_tests.json - scmid: default - sourceid: wildcard_matcher_tests.json - kind: file - spec: - file: tests/upstream/json-specs/wildcard_matcher_tests.json diff --git a/.ci/updatecli.d/update-specs.yml b/.ci/updatecli.d/update-specs.yml deleted file mode 100644 index ab3bd34c7..000000000 --- a/.ci/updatecli.d/update-specs.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: update-specs -pipelineid: update-schema-specs -title: synchronize schema specs - -scms: - default: - kind: github - spec: - user: '{{ requiredEnv "GIT_USER" }}' - email: '{{ requiredEnv "GIT_EMAIL" }}' - owner: elastic - repository: apm-agent-python - token: '{{ requiredEnv "GITHUB_TOKEN" }}' - username: '{{ requiredEnv "GIT_USER" }}' - branch: main - -sources: - sha: - kind: file - spec: - file: 'https://github.com/elastic/apm-data/commit/main.patch' - matchpattern: "^From\\s([0-9a-f]{40})\\s" - transformers: - - findsubmatch: - pattern: "[0-9a-f]{40}" - error.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm-data/main/input/elasticapm/docs/spec/v2/error.json - metadata.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm-data/main/input/elasticapm/docs/spec/v2/metadata.json - metricset.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm-data/main/input/elasticapm/docs/spec/v2/metricset.json - span.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm-data/main/input/elasticapm/docs/spec/v2/span.json - transaction.json: - kind: file - spec: - file: https://raw.githubusercontent.com/elastic/apm-data/main/input/elasticapm/docs/spec/v2/transaction.json - -actions: - pr: - kind: "github/pullrequest" - scmid: default - title: '[Automation] Update JSON schema specs' - spec: - automerge: false - draft: false - labels: - - "automation" - description: |- - ### What - APM agent json schema automatic sync - ### Why - *Changeset* - * https://github.com/elastic/apm-data/commit/{{ source "sha" }} - -targets: - error.json: - name: error.json - scmid: default - sourceid: error.json - kind: file - spec: - file: tests/upstream/json-specs/error.json - forcecreate: true - metadata.json: - name: metadata.json - scmid: default - sourceid: metadata.json - kind: file - spec: - file: tests/upstream/json-specs/metadata.json - forcecreate: true - metricset.json: - name: metricset.json - scmid: default - sourceid: metricset.json - kind: file - spec: - file: tests/upstream/json-specs/metricset.json - forcecreate: true - span.json: - name: span.json - scmid: default - sourceid: span.json - kind: file - spec: - file: tests/upstream/json-specs/span.json - forcecreate: true - transaction.json: - name: transaction.json - scmid: default - sourceid: transaction.json - kind: file - spec: - file: tests/upstream/json-specs/transaction.json - forcecreate: true diff --git a/.ci/updatecli/values.d/apm-data-spec.yml b/.ci/updatecli/values.d/apm-data-spec.yml new file mode 100644 index 000000000..4bf89f633 --- /dev/null +++ b/.ci/updatecli/values.d/apm-data-spec.yml @@ -0,0 +1 @@ +apm_schema_specs_path: tests/upstream/json-specs diff --git a/.ci/updatecli/values.d/apm-gherkin.yml b/.ci/updatecli/values.d/apm-gherkin.yml new file mode 100644 index 000000000..7234fe8c8 --- /dev/null +++ b/.ci/updatecli/values.d/apm-gherkin.yml @@ -0,0 +1 @@ +apm_gherkin_specs_path: tests/bdd/features \ No newline at end of file diff --git a/.ci/updatecli/values.d/apm-json-specs.yml b/.ci/updatecli/values.d/apm-json-specs.yml new file mode 100644 index 000000000..c527210e4 --- /dev/null +++ b/.ci/updatecli/values.d/apm-json-specs.yml @@ -0,0 +1 @@ +apm_json_specs_path: tests/upstream/json-specs diff --git a/.ci/updatecli/values.d/scm.yml b/.ci/updatecli/values.d/scm.yml new file mode 100644 index 000000000..ac8be9843 --- /dev/null +++ b/.ci/updatecli/values.d/scm.yml @@ -0,0 +1,10 @@ +scm: + enabled: true + owner: elastic + repository: apm-agent-python + branch: main + commitusingapi: true + # begin update-compose policy values + user: obltmachine + email: obltmachine@users.noreply.github.com + # end update-compose policy values \ No newline at end of file diff --git a/.ci/updatecli/values.d/update-compose.yml b/.ci/updatecli/values.d/update-compose.yml new file mode 100644 index 000000000..02df609f2 --- /dev/null +++ b/.ci/updatecli/values.d/update-compose.yml @@ -0,0 +1,3 @@ +spec: + files: + - "updatecli-compose.yaml" \ No newline at end of file diff --git a/.flake8 b/.flake8 index f629a9d29..7be07fbeb 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,5 @@ [flake8] exclude= - elasticapm/utils/wrapt/**, build/**, src/**, tests/**, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..245dd6855 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +* @elastic/apm-agent-python +/.github/actions/ @elastic/apm-agent-python @elastic/observablt-ci +/.github/workflows/ @elastic/apm-agent-python @elastic/observablt-ci diff --git a/.github/actions/build-distribution/action.yml b/.github/actions/build-distribution/action.yml new file mode 100644 index 000000000..6835dbec2 --- /dev/null +++ b/.github/actions/build-distribution/action.yml @@ -0,0 +1,21 @@ +--- + +name: common build distribution tasks +description: Run the build distribution + +runs: + using: "composite" + steps: + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + + - name: Build lambda layer zip + run: ./dev-utils/make-distribution.sh + shell: bash + + - uses: actions/upload-artifact@v6 + with: + name: build-distribution + path: ./build/ + if-no-files-found: error diff --git a/.github/actions/packages/action.yml b/.github/actions/packages/action.yml new file mode 100644 index 000000000..45d71ecd5 --- /dev/null +++ b/.github/actions/packages/action.yml @@ -0,0 +1,27 @@ +--- + +name: common package tasks +description: Run the packages + +runs: + using: "composite" + steps: + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + - name: Override the version if there is no tag release. + run: | + if [[ "${GITHUB_REF}" != refs/tags/* ]]; then + echo "ELASTIC_CI_POST_VERSION=${{ github.run_id }}" >> "${GITHUB_ENV}" + fi + shell: bash + - name: Build packages + run: ./dev-utils/make-packages.sh + shell: bash + - name: Upload Packages + uses: actions/upload-artifact@v6 + with: + name: packages + path: | + dist/*.whl + dist/*tar.gz diff --git a/.github/community-label.yml b/.github/community-label.yml deleted file mode 100644 index 8872df2d5..000000000 --- a/.github/community-label.yml +++ /dev/null @@ -1,5 +0,0 @@ -# add 'community' label to all new issues and PRs created by the community -community: - - '.*' -triage: - - '.*' \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml index eb8155b22..9abbe4339 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,5 +1,12 @@ --- version: 2 +registries: + docker-elastic: + type: docker-registry + url: https://docker.elastic.co + username: ${{secrets.ELASTIC_DOCKER_USERNAME}} + password: ${{secrets.ELASTIC_DOCKER_PASSWORD}} + updates: # Enable version updates for python - package-ecosystem: "pip" @@ -8,8 +15,29 @@ updates: # Check for updates once a week schedule: interval: "weekly" - reviewers: - - "elastic/apm-agent-python" + day: "sunday" + time: "22:00" ignore: - dependency-name: "urllib3" # ignore until lambda runtimes use OpenSSL 1.1.1+ versions: [">=2.0.0"] + + # GitHub actions + - package-ecosystem: "github-actions" + directories: + - '/' + - '/.github/actions/*' + schedule: + interval: "weekly" + day: "sunday" + time: "22:00" + groups: + github-actions: + patterns: + - "*" + + - package-ecosystem: "docker" + directories: + - '/' + registries: "*" + schedule: + interval: "daily" diff --git a/.github/labeler-config.yml b/.github/labeler-config.yml deleted file mode 100644 index a1e4dbc29..000000000 --- a/.github/labeler-config.yml +++ /dev/null @@ -1,3 +0,0 @@ -# add 'agent-python' label to all new issues -agent-python: - - '.*' diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 3cdfe70f0..5e2641541 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -39,6 +39,7 @@ Once a PR has been opened then there are two different ways you can trigger buil 1. Commit based 1. UI based, any Elasticians can force a build through the GitHub UI +1. PR review comment-based, any Elastic employees can force a full matrix test run through a PR review comment with the following syntax: `/test matrix`. #### Branches @@ -51,4 +52,4 @@ The tag release follows the naming convention: `v...`, wher ### OpenTelemetry -There is a GitHub workflow in charge to populate what the workflow run in terms of jobs and steps. Those details can be seen in [here](https://ela.st/oblt-ci-cd-stats) (**NOTE**: only available for Elasticians). +Every workflow and its logs are exported to OpenTelemetry traces/logs/metrics. Those details can be seen [here](https://ela.st/oblt-ci-cd-stats) (**NOTE**: only available for Elasticians). diff --git a/.github/workflows/addToProject.yml b/.github/workflows/addToProject.yml deleted file mode 100644 index 0a3b76924..000000000 --- a/.github/workflows/addToProject.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Auto Assign to Project(s) - -on: - issues: - types: [opened, edited, milestoned] -env: - MY_GITHUB_TOKEN: ${{ secrets.APM_TECH_USER_TOKEN }} - -jobs: - assign_one_project: - runs-on: ubuntu-latest - name: Assign milestoned to Project - steps: - - name: Assign issues with milestones to project - uses: elastic/assign-one-project-github-action@1.2.2 - if: github.event.issue && github.event.issue.milestone - with: - project: 'https://github.com/orgs/elastic/projects/454' - project_id: '5882982' - column_name: 'Planned' diff --git a/.github/workflows/build-distribution.yml b/.github/workflows/build-distribution.yml deleted file mode 100644 index 986632acd..000000000 --- a/.github/workflows/build-distribution.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: build-distribution - -on: - workflow_call: ~ - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - name: Build lambda layer zip - run: ./dev-utils/make-distribution.sh - - uses: actions/upload-artifact@v3 - with: - name: build-distribution - path: ./build/ - if-no-files-found: error diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml new file mode 100644 index 000000000..acef1e165 --- /dev/null +++ b/.github/workflows/build-images.yml @@ -0,0 +1,35 @@ +--- +name: build-images + +on: + workflow_dispatch: ~ + +permissions: + contents: read + +jobs: + + build-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/apm-agent-python-testing + steps: + + - uses: actions/checkout@v6 + + - name: Login to ghcr.io + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - run: ./util.sh --action build --registry ${{ env.REGISTRY }} --name ${{ env.IMAGE_NAME }} + working-directory: .ci/docker + + - run: ./util.sh --action push --registry ${{ env.REGISTRY }} --name ${{ env.IMAGE_NAME }} + working-directory: .ci/docker diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml new file mode 100644 index 000000000..adf95da5d --- /dev/null +++ b/.github/workflows/docs-build.yml @@ -0,0 +1,19 @@ +name: docs-build + +on: + push: + branches: + - main + pull_request_target: ~ + merge_group: ~ + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-build.yml@main + with: + path-pattern: docs/** + permissions: + deployments: write + id-token: write + contents: read + pull-requests: write diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 000000000..f83e017b5 --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,14 @@ +name: docs-cleanup + +on: + pull_request_target: + types: + - closed + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main + permissions: + contents: none + id-token: write + deployments: write diff --git a/.github/workflows/github-commands-comment.yml b/.github/workflows/github-commands-comment.yml new file mode 100644 index 000000000..8b5f48d34 --- /dev/null +++ b/.github/workflows/github-commands-comment.yml @@ -0,0 +1,18 @@ +--- +name: github-commands-comment + +on: + pull_request_target: + types: + - opened + +permissions: + contents: read + +jobs: + comment: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - uses: elastic/oblt-actions/elastic/github-commands@v1 diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index df219658c..f1a14d24f 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -4,43 +4,35 @@ on: types: [opened] pull_request_target: types: [opened] -env: - MY_GITHUB_TOKEN: ${{ secrets.APM_TECH_USER_TOKEN }} + +# '*: write' permissions for https://docs.github.com/en/rest/issues/labels?apiVersion=2022-11-28#add-labels-to-an-issue +permissions: + contents: read + issues: write + pull-requests: write + jobs: triage: runs-on: ubuntu-latest + env: + NUMBER: ${{ github.event.issue.number || github.event.pull_request.number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - name: Add agent-python label - uses: AlexanderWert/issue-labeler@v2.3 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - configuration-path: .github/labeler-config.yml - enable-versioned-regex: 0 - - name: Check team membership for user - uses: elastic/get-user-teams-membership@v1.0.4 - id: checkUserMember + - name: Get token + id: get_token + uses: actions/create-github-app-token@v2 with: - username: ${{ github.actor }} - team: 'apm' - usernamesToExclude: | - apmmachine - dependabot - GITHUB_TOKEN: ${{ secrets.APM_TECH_USER_TOKEN }} - - name: Show team membership - run: | - echo "::debug::isTeamMember: ${{ steps.checkUserMember.outputs.isTeamMember }}" - echo "::debug::isExcluded: ${{ steps.checkUserMember.outputs.isExcluded }}" - - name: Add community and triage lables - if: steps.checkUserMember.outputs.isTeamMember != 'true' && steps.checkUserMember.outputs.isExcluded != 'true' - uses: AlexanderWert/issue-labeler@v2.3 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - configuration-path: .github/community-label.yml - enable-versioned-regex: 0 - - name: Assign new internal pull requests to project - uses: elastic/assign-one-project-github-action@1.2.2 - if: (steps.checkUserMember.outputs.isTeamMember == 'true' || steps.checkUserMember.outputs.isExcluded == 'true') && github.event.pull_request + app-id: ${{ secrets.OBS_AUTOMATION_APP_ID }} + private-key: ${{ secrets.OBS_AUTOMATION_APP_PEM }} + permission-members: read + - name: Add agent-python label + run: gh issue edit "$NUMBER" --add-label "agent-python" --repo "${{ github.repository }}" + - id: is_elastic_member + uses: elastic/oblt-actions/github/is-member-of@v1 with: - project: 'https://github.com/orgs/elastic/projects/454' - project_id: '5882982' - column_name: 'In Progress' + github-org: "elastic" + github-user: ${{ github.actor }} + github-token: ${{ steps.get_token.outputs.token }} + - name: Add community and triage labels + if: contains(steps.is_elastic_member.outputs.result, 'false') && github.actor != 'dependabot[bot]' && github.actor != 'elastic-observability-automation[bot]' + run: gh issue edit "$NUMBER" --add-label "community,triage" --repo "${{ github.repository }}" diff --git a/.github/workflows/matrix-command.yml b/.github/workflows/matrix-command.yml new file mode 100644 index 000000000..9692b8cea --- /dev/null +++ b/.github/workflows/matrix-command.yml @@ -0,0 +1,49 @@ +name: matrix-command + +on: + pull_request_review: + types: + - submitted + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +permissions: + contents: read + +jobs: + command-validation: + if: startsWith(github.event.review.body, '/test matrix') + runs-on: ubuntu-latest + timeout-minutes: 5 + permissions: + pull-requests: write + steps: + - name: Is comment allowed? + uses: actions/github-script@v8 + with: + script: | + const actorPermission = (await github.rest.repos.getCollaboratorPermissionLevel({ + ...context.repo, + username: context.actor + })).data.permission + const isPermitted = ['write', 'admin'].includes(actorPermission) + if (!isPermitted) { + const errorMessage = 'Only users with write permission to the repository can run GitHub commands' + await github.rest.issues.createComment({ + ...context.repo, + issue_number: context.issue.number, + body: errorMessage, + }) + core.setFailed(errorMessage) + return + } + + test: + needs: + - command-validation + uses: ./.github/workflows/test.yml + with: + full-matrix: true + ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/microbenchmark.yml b/.github/workflows/microbenchmark.yml index 9af88a6b1..e3f0a41d6 100644 --- a/.github/workflows/microbenchmark.yml +++ b/.github/workflows/microbenchmark.yml @@ -16,32 +16,16 @@ permissions: jobs: microbenchmark: runs-on: ubuntu-latest - # wait up to 1 hour - timeout-minutes: 60 + timeout-minutes: 5 steps: - - id: buildkite - name: Run buildkite pipeline - uses: elastic/apm-pipeline-library/.github/actions/buildkite@current + - name: Run microbenchmark + uses: elastic/oblt-actions/buildkite/run@v1 with: - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} - pipeline: apm-agent-microbenchmark - waitFor: true - printBuildLogs: true - buildEnvVars: | + pipeline: "apm-agent-microbenchmark" + token: ${{ secrets.BUILDKITE_TOKEN }} + wait-for: false + env-vars: | script=.ci/bench.sh repo=apm-agent-python sha=${{ github.sha }} BRANCH_NAME=${{ github.ref_name }} - - - if: ${{ failure() }} - uses: elastic/apm-pipeline-library/.github/actions/slack-message@current - with: - url: ${{ secrets.VAULT_ADDR }} - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - channel: "#apm-agent-python" - message: | - :ghost: [${{ github.repository }}] microbenchmark *${{ github.ref_name }}* failed to run in Buildkite. - Build: (<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|here>) diff --git a/.github/workflows/opentelemetry.yml b/.github/workflows/opentelemetry.yml deleted file mode 100644 index ea858e655..000000000 --- a/.github/workflows/opentelemetry.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: OpenTelemetry Export Trace - -on: - workflow_run: - workflows: - - pre-commit - - test - - test-reporter - - snapshoty - - release - - packages - - updatecli - types: [completed] - -jobs: - otel-export-trace: - runs-on: ubuntu-latest - steps: - - uses: elastic/apm-pipeline-library/.github/actions/opentelemetry@current - with: - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/packages.yml b/.github/workflows/packages.yml index 148110c7f..cb4c268c7 100644 --- a/.github/workflows/packages.yml +++ b/.github/workflows/packages.yml @@ -13,25 +13,12 @@ on: - '**/*.md' - '**/*.asciidoc' +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - name: Install wheel - run: pip install --user wheel - - name: Building universal wheel - run: python setup.py bdist_wheel - - name: Building source distribution - run: python setup.py sdist - - name: Upload Packages - uses: actions/upload-artifact@v3 - with: - name: packages - path: | - dist/*.whl - dist/*tar.gz - + - uses: actions/checkout@v6 + - uses: ./.github/actions/packages diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index c2f7e71fc..4839430e8 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -5,10 +5,11 @@ on: push: branches: [main] +permissions: + contents: read + jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v3 - - uses: pre-commit/action@v3.0.0 + - uses: elastic/oblt-actions/pre-commit@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 03a77ce47..1b39cddd9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,16 +4,50 @@ on: push: tags: - "v*.*.*" + branches: + - main permissions: contents: read jobs: + build-distribution: + permissions: + artifact-metadata: write + attestations: write + contents: write + id-token: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: ./.github/actions/build-distribution + - name: generate build provenance + uses: actions/attest-build-provenance@v3 + with: + subject-path: "${{ github.workspace }}/build/dist/elastic-apm-python-lambda-layer.zip" + test: - uses: ./.github/workflows/test.yml + uses: ./.github/workflows/test-release.yml + needs: build-distribution + with: + full-matrix: true + enabled: ${{ startsWith(github.ref, 'refs/tags') }} + skip-build: true packages: - uses: ./.github/workflows/packages.yml + permissions: + artifact-metadata: write + attestations: write + contents: write + id-token: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: ./.github/actions/packages + - name: generate build provenance + uses: actions/attest-build-provenance@v3 + with: + subject-path: "${{ github.workspace }}/dist/*" publish-pypi: needs: @@ -24,99 +58,127 @@ jobs: permissions: id-token: write # IMPORTANT: this permission is mandatory for trusted publishing steps: - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v6 + - uses: actions/download-artifact@v7 with: name: packages path: dist - - name: Upload - uses: pypa/gh-action-pypi-publish@f5622bde02b04381239da3573277701ceca8f6a0 + - name: Upload pypi.org + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 with: repository-url: https://upload.pypi.org/legacy/ - - build-distribution: - uses: ./.github/workflows/build-distribution.yml + - name: Upload test.pypi.org + if: ${{ ! startsWith(github.ref, 'refs/tags') }} + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 + with: + repository-url: https://test.pypi.org/legacy/ publish-lambda-layers: + permissions: + contents: read + id-token: write needs: - build-distribution runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: hashicorp/vault-action@v2.7.2 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - secrets: | - secret/observability-team/ci/service-account/apm-agent-python access_key_id | AWS_ACCESS_KEY_ID ; - secret/observability-team/ci/service-account/apm-agent-python secret_access_key | AWS_SECRET_ACCESS_KEY - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v6 + - uses: actions/download-artifact@v7 with: name: build-distribution path: ./build + - uses: elastic/oblt-actions/aws/auth@v1 + with: + aws-account-id: "267093732750" - name: Publish lambda layers to AWS + if: startsWith(github.ref, 'refs/tags') run: | # Convert v1.2.3 to ver-1-2-3 VERSION=${GITHUB_REF_NAME/v/ver-} VERSION=${VERSION//./-} ELASTIC_LAYER_NAME="elastic-apm-python-${VERSION}" .ci/publish-aws.sh - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v6 + if: startsWith(github.ref, 'refs/tags') with: name: arn-file - path: ".arn-file.md" + path: "${{ github.workspace }}/.arn-file.md" if-no-files-found: error publish-docker: needs: - build-distribution runs-on: ubuntu-latest + permissions: + artifact-metadata: write + attestations: write + contents: write + id-token: write + strategy: + fail-fast: false + matrix: + dockerfile: [ 'Dockerfile', 'Dockerfile.wolfi' ] + env: + DOCKER_IMAGE_NAME: docker.elastic.co/observability/apm-agent-python steps: - - uses: actions/checkout@v3 - - uses: elastic/apm-pipeline-library/.github/actions/docker-login@current + - uses: actions/checkout@v6 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + + - name: Log in to the Elastic Container registry + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 with: - registry: docker.elastic.co - secret: secret/observability-team/ci/docker-registry/prod - url: ${{ secrets.VAULT_ADDR }} - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - - uses: actions/download-artifact@v3 + registry: ${{ secrets.ELASTIC_DOCKER_REGISTRY }} + username: ${{ secrets.ELASTIC_DOCKER_USERNAME }} + password: ${{ secrets.ELASTIC_DOCKER_PASSWORD }} + + - uses: actions/download-artifact@v7 with: name: build-distribution path: ./build - - id: setup-docker - name: Set up docker variables - run: |- - # version without v prefix (e.g. 1.2.3) - echo "tag=${GITHUB_REF_NAME/v/}" >> "${GITHUB_OUTPUT}" - echo "name=docker.elastic.co/observability/apm-agent-python" >> "${GITHUB_OUTPUT}" - - name: Docker build - run: >- - docker build - -t ${{ steps.setup-docker.outputs.name }}:${{ steps.setup-docker.outputs.tag }} - --build-arg AGENT_DIR=./build/dist/package/python - . - - name: Docker retag - run: >- - docker tag - ${{ steps.setup-docker.outputs.name }}:${{ steps.setup-docker.outputs.tag }} - ${{ steps.setup-docker.outputs.name }}:latest - - name: Docker push - run: |- - docker push ${{ steps.setup-docker.outputs.name }}:${{ steps.setup-docker.outputs.tag }} - docker push ${{ steps.setup-docker.outputs.name }}:latest + + - name: Extract metadata (tags, labels) + id: docker-meta + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + with: + images: ${{ env.DOCKER_IMAGE_NAME }} + tags: | + type=raw,value=latest,prefix=test-,enable={{is_default_branch}} + type=semver,pattern={{version}} + flavor: | + suffix=${{ contains(matrix.dockerfile, 'wolfi') && '-wolfi' || '' }} + + - name: Build and push image + id: docker-push + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 + with: + context: . + platforms: linux/amd64,linux/arm64 + push: true + file: ${{ matrix.dockerfile }} + tags: ${{ steps.docker-meta.outputs.tags }} + labels: ${{ steps.docker-meta.outputs.labels }} + build-args: | + AGENT_DIR=./build/dist/package/python + + - name: generate build provenance (containers) + uses: actions/attest-build-provenance@v3 + with: + subject-name: "${{ env.DOCKER_IMAGE_NAME }}" + subject-digest: ${{ steps.docker-push.outputs.digest }} + push-to-registry: true github-draft: permissions: contents: write needs: - publish-lambda-layers + if: startsWith(github.ref, 'refs/tags') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v6 + - uses: actions/download-artifact@v7 with: name: arn-file - name: Create GitHub Draft Release @@ -139,13 +201,12 @@ jobs: - github-draft steps: - id: check - uses: elastic/apm-pipeline-library/.github/actions/check-dependent-jobs@current + uses: elastic/oblt-actions/check-dependent-jobs@v1 with: - needs: ${{ toJSON(needs) }} - - uses: elastic/apm-pipeline-library/.github/actions/notify-build-status@current + jobs: ${{ toJSON(needs) }} + - if: startsWith(github.ref, 'refs/tags') + uses: elastic/oblt-actions/slack/notify-result@v1 with: + bot-token: ${{ secrets.SLACK_BOT_TOKEN }} + channel-id: "#apm-agent-python" status: ${{ steps.check.outputs.status }} - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} - slackChannel: "#apm-agent-python" diff --git a/.github/workflows/run-matrix.yml b/.github/workflows/run-matrix.yml index 811f68dd9..e14b0e6ea 100644 --- a/.github/workflows/run-matrix.yml +++ b/.github/workflows/run-matrix.yml @@ -8,6 +8,9 @@ on: description: Matrix include JSON string type: string +permissions: + contents: read + jobs: docker: name: "docker (version: ${{ matrix.version }}, framework: ${{ matrix.framework }})" @@ -17,21 +20,26 @@ jobs: max-parallel: 10 matrix: include: ${{ fromJSON(inputs.include) }} + env: + # These env variables are used in the docker-compose.yml and the run_tests.sh script. + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/apm-agent-python-testing steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 - name: Run tests run: ./tests/scripts/docker/run_tests.sh ${{ matrix.version }} ${{ matrix.framework }} env: LOCALSTACK_VOLUME_DIR: localstack_data - if: success() || failure() name: Upload JUnit Test Results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: test-results + name: test-results-${{ matrix.framework }}-${{ matrix.version }} path: "**/*-python-agent-junit.xml" - if: success() || failure() name: Upload Coverage Reports - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: coverage-reports + name: coverage-reports-${{ matrix.framework }}-${{ matrix.version }} path: "**/.coverage*" + include-hidden-files: true diff --git a/.github/workflows/snapshoty.yml b/.github/workflows/snapshoty.yml deleted file mode 100644 index 3f91e2213..000000000 --- a/.github/workflows/snapshoty.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Publish a snapshot. A "snapshot" is a packaging of the latest *unreleased* APM agent, -# published to a known GCS bucket for use in edge demo/test environments. -name: snapshoty - -on: - workflow_run: - workflows: - - test - types: - - completed - branches: - - main - -jobs: - packages: - if: ${{ github.event.workflow_run.conclusion == 'success' }} - uses: ./.github/workflows/packages.yml - upload: - needs: - - packages - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - with: - name: packages - path: dist - - name: Publish snaphosts - uses: elastic/apm-pipeline-library/.github/actions/snapshoty-simple@current - with: - config: '.ci/snapshoty.yml' - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/test-docs.yml b/.github/workflows/test-docs.yml index 86b24cc0c..1f28a567c 100644 --- a/.github/workflows/test-docs.yml +++ b/.github/workflows/test-docs.yml @@ -36,7 +36,7 @@ jobs: ENDOFFILE - if: success() || failure() name: Upload JUnit Test Results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: test-results + name: test-results-docs path: "docs-python-agent-junit.xml" diff --git a/.github/workflows/test-fips.yml b/.github/workflows/test-fips.yml new file mode 100644 index 000000000..4c77c98f9 --- /dev/null +++ b/.github/workflows/test-fips.yml @@ -0,0 +1,69 @@ + +# run test suite inside a FIPS 140 container +name: test-fips + +on: + workflow_dispatch: + schedule: + - cron: '0 4 * * 1' + +permissions: + contents: read + +jobs: + create-matrix: + runs-on: ubuntu-24.04 + outputs: + matrix: ${{ steps.generate.outputs.matrix }} + steps: + - uses: actions/checkout@v6 + - id: generate + uses: elastic/oblt-actions/version-framework@v1 + with: + versions-file: .ci/.matrix_python_fips.yml + frameworks-file: .ci/.matrix_framework_fips.yml + + test-fips: + needs: create-matrix + runs-on: ubuntu-24.04 + # https://docs.github.com/en/actions/writing-workflows/choosing-where-your-workflow-runs/running-jobs-in-a-container + # docker run -it --rm --name fipsy docker.elastic.co/wolfi/python-fips:3.12 + container: + image: docker.elastic.co/wolfi/python-fips:3.12-dev + options: --user root + credentials: + username: ${{ secrets.ELASTIC_DOCKER_USERNAME }} + password: ${{ secrets.ELASTIC_DOCKER_PASSWORD }} + timeout-minutes: 30 + strategy: + fail-fast: false + max-parallel: 10 + matrix: ${{ fromJSON(needs.create-matrix.outputs.matrix) }} + steps: + - uses: actions/checkout@v6 + - name: check that python has fips mode enabled + run: | + python3 -c 'import _hashlib; assert _hashlib.get_fips_mode() == 1' + - name: install run_tests.sh requirements + run: apk add netcat-openbsd tzdata + - name: Run tests + run: ./tests/scripts/run_tests.sh + env: + FRAMEWORK: ${{ matrix.framework }} + + notify-on-failure: + if: always() + runs-on: ubuntu-24.04 + needs: test-fips + steps: + - id: check + uses: elastic/oblt-actions/check-dependent-jobs@v1 + with: + jobs: ${{ toJSON(needs) }} + - name: Notify in Slack + if: steps.check.outputs.status == 'failure' + uses: elastic/oblt-actions/slack/notify-result@v1 + with: + bot-token: ${{ secrets.SLACK_BOT_TOKEN }} + status: ${{ steps.check.outputs.status }} + channel-id: "#apm-agent-python" diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml new file mode 100644 index 000000000..43812eb94 --- /dev/null +++ b/.github/workflows/test-release.yml @@ -0,0 +1,51 @@ +name: test-release + +on: + workflow_call: + inputs: + full-matrix: + description: "Run the full matrix" + required: true + type: boolean + ref: + description: "The git ref of elastic/apm-agent-python to run test workflow from." + required: false + type: string + enabled: + description: "Whether to run the workfow" + required: true + type: boolean + skip-build: + description: "Skip the build distribution step" + required: false + type: boolean + default: false + workflow_dispatch: + inputs: + full-matrix: + description: "Run the full matrix" + required: true + type: boolean + enabled: + description: "Whether to run the workfow" + required: true + type: boolean + skip-build: + description: "Skip the build distribution step" + required: false + type: boolean + default: false + +jobs: + test: + if: ${{ inputs.enabled }} + uses: ./.github/workflows/test.yml + with: + full-matrix: ${{ inputs.full-matrix }} + skip-build: ${{ inputs.skip-build }} + + run-if-disabled: + if: ${{ ! inputs.enabled }} + runs-on: ubuntu-latest + steps: + - run: echo "do something to help with the reusable workflows with needs" diff --git a/.github/workflows/test-reporter.yml b/.github/workflows/test-reporter.yml index 4b0b7620d..ffb1206a6 100644 --- a/.github/workflows/test-reporter.yml +++ b/.github/workflows/test-reporter.yml @@ -8,13 +8,18 @@ on: types: - completed +permissions: + contents: read + actions: read + checks: write + jobs: report: runs-on: ubuntu-latest steps: - - uses: elastic/apm-pipeline-library/.github/actions/test-report@current + - uses: elastic/oblt-actions/test-report@v1 with: - artifact: test-results - name: JUnit Tests + artifact: /test-results(.*)/ + name: 'Test Report $1' path: "**/*-python-agent-junit.xml" reporter: java-junit diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 363dec4a6..d82f75b27 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,7 +1,21 @@ name: test # The name must be the same as in test-docs.yml on: - workflow_call: ~ + workflow_call: + inputs: + full-matrix: + description: "Run the full matrix" + required: true + type: boolean + ref: + description: "The git ref of elastic/apm-agent-python to run test workflow from." + required: false + type: string + skip-build: + description: "Skip the build distribution step" + required: false + type: boolean + default: false pull_request: paths-ignore: - "**/*.md" @@ -14,10 +28,29 @@ on: - "**/*.asciidoc" schedule: - cron: "0 2 * * *" + workflow_dispatch: + inputs: + full-matrix: + description: "Run the full matrix" + required: true + type: boolean + skip-build: + description: "Skip the build distribution step" + required: false + type: boolean + default: false + +permissions: + contents: read jobs: build-distribution: - uses: ./.github/workflows/build-distribution.yml + if: ${{ !inputs.skip-build }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: ./.github/actions/build-distribution + create-matrix: runs-on: ubuntu-latest @@ -26,15 +59,17 @@ jobs: data: ${{ steps.split.outputs.data }} chunks: ${{ steps.split.outputs.chunks }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 + with: + ref: ${{ inputs.ref || github.ref }} - id: generate - uses: elastic/apm-pipeline-library/.github/actions/version-framework@current + uses: elastic/oblt-actions/version-framework@v1 with: # Use .ci/.matrix_python_full.yml if it's a scheduled workflow, otherwise use .ci/.matrix_python.yml - versionsFile: .ci/.matrix_python${{ github.event_name == 'schedule' && '_full' || '' }}.yml + versions-file: .ci/.matrix_python${{ (github.event_name == 'schedule' || github.event_name == 'push' || inputs.full-matrix) && '_full' || '' }}.yml # Use .ci/.matrix_framework_full.yml if it's a scheduled workflow, otherwise use .ci/.matrix_framework.yml - frameworksFile: .ci/.matrix_framework${{ github.event_name == 'schedule' && '_full' || '' }}.yml - excludedFile: .ci/.matrix_exclude.yml + frameworks-file: .ci/.matrix_framework${{ (github.event_name == 'schedule' || github.event_name == 'push' || inputs.full-matrix) && '_full' || '' }}.yml + excluded-file: .ci/.matrix_exclude.yml - name: Split matrix shell: python id: split @@ -85,7 +120,7 @@ jobs: windows: name: "windows (version: ${{ matrix.version }}, framework: ${{ matrix.framework }}, asyncio: ${{ matrix.asyncio }})" - runs-on: windows-2019 + runs-on: windows-2022 strategy: fail-fast: false matrix: @@ -107,8 +142,10 @@ jobs: FRAMEWORK: ${{ matrix.framework }} ASYNCIO: ${{ matrix.asyncio }} steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@v6 + with: + ref: ${{ inputs.ref || github.ref }} + - uses: actions/setup-python@v6 with: python-version: ${{ matrix.version }} cache: pip @@ -119,16 +156,18 @@ jobs: run: .\scripts\run-tests.bat - if: success() || failure() name: Upload JUnit Test Results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: test-results + name: test-results-${{ matrix.framework }}-${{ matrix.version }}-asyncio-${{ matrix.asyncio }} path: "**/*-python-agent-junit.xml" + retention-days: 1 - if: success() || failure() name: Upload Coverage Reports - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: coverage-reports + name: coverage-reports-${{ matrix.framework }}-${{ matrix.version }}-asyncio-${{ matrix.asyncio }} path: "**/.coverage*" + retention-days: 1 # This job is here to have a single status check that can be set as required. # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idneeds # If a run contains a series of jobs that need each other, a failure applies to all jobs in the dependency chain from the point of failure onwards. @@ -142,7 +181,17 @@ jobs: - chunks-3 - windows steps: - - run: test $(echo '${{ toJSON(needs) }}' | jq -s 'map(.[].result) | all(.=="success")') = 'true' + - id: check + uses: elastic/oblt-actions/check-dependent-jobs@v1 + with: + jobs: ${{ toJSON(needs) }} + - run: ${{ steps.check.outputs.is-success }} + - if: failure() && (github.event_name == 'schedule' || github.event_name == 'push') + uses: elastic/oblt-actions/slack/notify-result@v1 + with: + bot-token: ${{ secrets.SLACK_BOT_TOKEN }} + status: ${{ steps.check.outputs.status }} + channel-id: "#apm-agent-python" coverage: name: Combine & check coverage. @@ -150,18 +199,21 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 + with: + ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v6 with: # Use latest Python, so it understands all syntax. python-version: 3.11 - run: python -Im pip install --upgrade coverage[toml] - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v7 with: - name: coverage-reports + pattern: coverage-reports-* + merge-multiple: true - name: Combine coverage & fail if it's <84%. run: | @@ -175,10 +227,10 @@ jobs: python -Im coverage report --fail-under=84 - name: Upload HTML report - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: name: html-coverage-report path: htmlcov - - uses: geekyeggo/delete-artifact@54ab544f12cdb7b71613a16a2b5a37a9ade990af + - uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # 5.1.0 with: - name: coverage-reports + name: coverage-reports-* diff --git a/.github/workflows/updatecli.yml b/.github/workflows/updatecli.yml index 2101ec798..e7d6fedf7 100644 --- a/.github/workflows/updatecli.yml +++ b/.github/workflows/updatecli.yml @@ -9,20 +9,46 @@ permissions: contents: read jobs: - bump: + compose: runs-on: ubuntu-latest + permissions: + contents: read + packages: read steps: - - uses: actions/checkout@v3 - - uses: elastic/apm-pipeline-library/.github/actions/updatecli@current + - uses: actions/checkout@v6 + + - name: Get token + id: get_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.OBS_AUTOMATION_APP_ID }} + private-key: ${{ secrets.OBS_AUTOMATION_APP_PEM }} + permission-contents: write + permission-pull-requests: write + + - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: elastic/oblt-actions/updatecli/run@v1 + with: + command: --experimental compose diff + version-file: .tool-versions + env: + GITHUB_TOKEN: ${{ steps.get_token.outputs.token }} + + - uses: elastic/oblt-actions/updatecli/run@v1 with: - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} - pipeline: .ci/updatecli.d + command: --experimental compose apply + version-file: .tool-versions + env: + GITHUB_TOKEN: ${{ steps.get_token.outputs.token }} + - if: failure() - uses: elastic/apm-pipeline-library/.github/actions/notify-build-status@current + uses: elastic/oblt-actions/slack/send@v1 with: - vaultUrl: ${{ secrets.VAULT_ADDR }} - vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} - vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} - slackChannel: "#apm-agent-python" + bot-token: ${{ secrets.SLACK_BOT_TOKEN }} + channel-id: "#apm-agent-python" + message: ":traffic_cone: updatecli failed for `${{ github.repository }}@${{ github.ref_name }}`, @robots-ci please look what's going on " diff --git a/.gitignore b/.gitignore index 88e0a400b..12eae962b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.egg *.db *.pid +*.swp .coverage* .DS_Store .idea @@ -18,7 +19,6 @@ pip-log.txt /docs/doctrees /example_project/*.db tests/.schemacache -elasticapm/utils/wrapt/_wrappers*.so coverage .tox .eggs diff --git a/.hound.yml b/.hound.yml deleted file mode 100644 index 0745a960a..000000000 --- a/.hound.yml +++ /dev/null @@ -1,5 +0,0 @@ -flake8: - enabled: true - config_file: .flake8 - -fail_on_violations: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f3341b37e..07f59f368 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ repos: language_version: python3 exclude: "(tests/utils/stacks/linenos.py|tests/utils/stacks/linenos2.py|tests/contrib/grpc/grpc_app/.*pb2.*.py)" - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.1.0 hooks: - id: flake8 exclude: "(tests/utils/stacks/linenos.py|tests/utils/stacks/linenos2.py|tests/contrib/grpc/grpc_app/.*pb2.*.py)" diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 000000000..4e6189ddb --- /dev/null +++ b/.tool-versions @@ -0,0 +1 @@ +updatecli v0.113.0 \ No newline at end of file diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 97c5abdfc..5a6bed978 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -5,7 +5,7 @@ endif::[] //// [[release-notes-x.x.x]] -==== x.x.x - YYYY/MM/DD +==== x.x.x - YYYY-MM-DD [float] ===== Breaking changes @@ -29,24 +29,177 @@ endif::[] //===== Bug fixes // -=== Unreleased +[[release-notes-6.x]] +=== Python Agent version 6.x -// Unreleased changes go here -// When the next release happens, nest these changes under the "Python Agent version 6.x" heading -//[float] -//===== Features -// -//[float] -//===== Bug fixes +[[release-notes-6.25.0]] +==== 6.25.0 - 2025-12-23 [float] -===== Pending Deprecations +===== Features + +* Introduce `ELASTIC_APM_SKIP_SERVER_INFO` to reduce overhead on serverless with APM server 8.7.1+ {pull}2516[#2516] +* List all exported symbols in elasticapm module `__all__` {pull}2504[#2504] + +[float] +===== Bug fixes + +* Handle Tornado 6.5.3 `HttpHeaders` `in` operator behavior change {pull}2512[#2512] + +[[release-notes-6.24.1]] +==== 6.24.1 - 2025-11-21 + +[float] +===== Bug fixes + +* Fix handling of psycopg ServerCursor and AsyncServerCursor instrumentation {pull}2489[#2489] +* Fix contrib/opentelemetry set_status to match base signature {pull}2457[#2457] + +[[release-notes-6.24.0]] +==== 6.24.0 - 2025-08-12 + +[float] +===== Features + +* Add support for recent sanic versions {pull}2190[#2190], {pull}2194[#2194] +* Make server certificate verification mandatory in fips mode {pull}2227[#2227] +* Add support Python 3.13 {pull}2216[#2216] +* Add support for azure-data-tables package for azure instrumentation {pull}2187[#2187] +* Add span links from SNS messages {pull}2363[#2363] +[float] +===== Bug fixes + +* Fix psycopg2 cursor execute and executemany signatures {pull}2331[#2331] +* Fix psycopg cursor execute and executemany signatures {pull}2332[#2332] +* Fix asgi middleware distributed tracing {pull}2334[#2334] +* Fix typing of start in Span / capture_span to float {pull}2335[#2335] +* Fix azure instrumentation client_class and metrics sets invocation {pull}2337[#2337] +* Fix mysql_connector instrumentation connection retrieval {pull}2344[#2344] +* Remove spurious Django QuerySet evaluation in case of database errors {pull}2158[#2158] + +[[release-notes-6.23.0]] +==== 6.23.0 - 2024-07-30 + +[float] +===== Features + +* Make published Docker images multi-platform with the addition of linux/arm64 {pull}2080[#2080] + +[float] +===== Bug fixes + +* Fix handling consumer iteration if transaction not sampled in kafka instrumentation {pull}2075[#2075] +* Fix race condition with urllib3 at shutdown {pull}2085[#2085] +* Fix compatibility with setuptools>=72 that removed test command {pull}2090[#2090] + +===== Deprecations + +* Python 3.6 support will be removed in version 7.0.0 of the agent * The log shipping LoggingHandler will be removed in version 7.0.0 of the agent. +* The log shipping feature in the Flask instrumentation will be removed in version 7.0.0 of the agent. +* The log shipping feature in the Django instrumentation will be removed in version 7.0.0 of the agent. +* The OpenTracing bridge will be removed in version 7.0.0 of the agent. +* Celery 4.0 support is deprecated because it's not installable anymore with a modern pip +[[release-notes-6.22.3]] +==== 6.22.3 - 2024-06-10 -[[release-notes-6.x]] -=== Python Agent version 6.x +[float] +===== Bug fixes + +* Fix outcome in ASGI and Starlette apps on error status codes without an exception {pull}2060[#2060] + +[[release-notes-6.22.2]] +==== 6.22.2 - 2024-05-20 + +[float] +===== Bug fixes + +* Fix CI release workflow {pull}2046[#2046] + +[[release-notes-6.22.1]] +==== 6.22.1 - 2024-05-17 + +[float] +===== Features + +* Relax wrapt dependency to only exclude 1.15.0 {pull}2005[#2005] + +[[release-notes-6.22.0]] +==== 6.22.0 - 2024-04-03 + +[float] +===== Features + +* Add ability to override default JSON serialization {pull}2018[#2018] + +[[release-notes-6.21.4]] +==== 6.21.4 - 2024-03-19 + +[float] +===== Bug fixes + +* Fix urllib3 2.0.1+ crash with many args {pull}2002[#2002] + +[[release-notes-6.21.3]] +==== 6.21.3 - 2024-03-08 + +[float] +===== Bug fixes + +* Fix artifacts download in CI workflows {pull}1996[#1996] + +[[release-notes-6.21.2]] +==== 6.21.2 - 2024-03-07 + +[float] +===== Bug fixes + +* Fix artifacts upload in CI build-distribution workflow {pull}1993[#1993] + +[[release-notes-6.21.1]] +==== 6.21.1 - 2024-03-07 + +[float] +===== Bug fixes + +* Fix CI release workflow {pull}1990[#1990] + +[[release-notes-6.21.0]] +==== 6.21.0 - 2024-03-06 + +[float] +===== Bug fixes + +* Fix starlette middleware setup without client argument {pull}1952[#1952] +* Fix blocking of gRPC stream-to-stream requests {pull}1967[#1967] +* Always take into account body reading time for starlette requests {pull}1970[#1970] +* Make urllib3 transport tests more robust against local env {pull}1969[#1969] +* Clarify starlette integration documentation {pull}1956[#1956] +* Make dbapi2 query scanning for dollar quotes a bit more correct {pull}1976[#1976] +* Normalize headers in AWS Lambda integration on API Gateway v1 requests {pull}1982[#1982] + +[[release-notes-6.20.0]] +==== 6.20.0 - 2024-01-10 + +[float] +===== Features + +* Async support for dbapi2 (starting with psycopg) {pull}1944[#1944] +* Add object name to procedure call spans in dbapi2 {pull}1938[#1938] +* Add support for python 3.10 and 3.11 lambda runtimes + +[float] +===== Bug fixes + +* Fix asyncpg support for 0.29+ {pull}1935[#1935] +* Fix dbapi2 signature extraction to handle square brackets in table name {pull}1947[#1947] + +[float] +===== Pending Deprecations + +* The log shipping LoggingHandler will be removed in version 7.0.0 of the agent. [[release-notes-6.19.0]] ==== 6.19.0 - 2023-10-11 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f31f6c3c9..a7f6d0703 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,7 +64,7 @@ Once your changes are ready to submit for review: 1. Submit a pull request - Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). + Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests) to the `main` branch. In the pull request, choose a title which sums up the changes that you have made, and in the body provide more details about what your changes do. @@ -108,7 +108,7 @@ that is a fixture which is defined #### Adding new instrumentations to the matrix build For tests that require external dependencies like databases, or for testing different versions of the same library, -we use a matrix build that leverages Docker and docker-compose. +we use a matrix build that leverages Docker. The setup requires a little bit of boilerplate to get started. In this example, we will create an instrumentation for the "foo" database, by instrumenting its Python driver, `foodriver`. @@ -153,7 +153,7 @@ In this example, we will create an instrumentation for the "foo" database, by in image: foobase:latest You'll also have to add a `DOCKER_DEPS` environment variable to `tests/scripts/envs/foo.sh` which tells the matrix - to spin up the given docker-compose service before running your tests. + to spin up the given Docker compose service before running your tests. You may also need to add things like hostname configuration here. DOCKER_DEPS="foo" @@ -174,21 +174,30 @@ should "Squash and merge". ### Releasing +Releases tags are signed so you need to have a PGP key set up, you can follow Github documentation on [creating a key](https://docs.github.com/en/authentication/managing-commit-signature-verification/generating-a-new-gpg-key) and +on [telling git about it](https://docs.github.com/en/authentication/managing-commit-signature-verification/telling-git-about-your-signing-key). Alternatively you can sign with a SSH key, remember you have to upload your key +again even if you want to use the same key you are using for authorization. +Then make sure you have SSO figured out for the key you are using to push to github, see [Github documentation](https://docs.github.com/articles/authenticating-to-a-github-organization-with-saml-single-sign-on/). + If you have commit access, the process is as follows: 1. Update the version in `elasticapm/version.py` according to the scale of the change. (major, minor or patch) 1. Update `CHANGELOG.asciidoc`. Rename the `Unreleased` section to the correct version (`vX.X.X`), and nest under the appropriate sub-heading, e.g., `Python Agent version 5.x`. +1. Update `docs/release-notes/`. 1. For Majors: [Create an issue](https://github.com/elastic/website-requests/issues/new) to request an update of the [EOL table](https://www.elastic.co/support/eol). 1. For Majors: Add the new major version to `conf.yaml` in the [elastic/docs](https://github.com/elastic/docs) repo. 1. Commit changes with message `update CHANGELOG and bump version to X.Y.Z` where `X.Y.Z` is the version in `elasticapm/version.py` -1. Open a PR against `main` with these changes +1. Open a PR against `main` with these changes leaving the body empty 1. Once the PR is merged, fetch and checkout `upstream/main` 1. Tag the commit with `git tag -s vX.Y.Z`, for example `git tag -s v1.2.3`. Copy the changelog for the release to the tag message, removing any leading `#`. -1. Reset the current major branch (`1.x`, `2.x` etc) to point to the current main, e.g. `git branch -f 1.x main` 1. Push tag upstream with `git push upstream --tags` (and optionally to your own fork as well) -1. Update major branch, e.g. `1.x` on upstream with `git push upstream 1.x` +1. Open a PR from `main` to the major branch, e.g. `1.x` to update it. In order to keep history create a + branch from the `main` branch, rebase it on top of the major branch to drop duplicated commits and then + merge with the `rebase` strategy. It is crucial that `main` and the major branch have the same content. 1. After tests pass, Github Actions will automatically build and push the new release to PyPI. 1. Edit and publish the [draft Github release](https://github.com/elastic/apm-agent-python/releases) - created by Github Actions. Copy the changelog into the body of the release. + created by Github Actions. Substitute the generated changelog with one hand written into the body of the + release. +1. Update substitutions variables in `docs/reference/lambda-support.md`. diff --git a/Dockerfile b/Dockerfile index a4752936a..44dfeb72b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,3 @@ -# Pin to Alpine 3.17.3 -FROM alpine@sha256:124c7d2707904eea7431fffe91522a01e5a861a624ee31d03372cc1d138a3126 +FROM alpine@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 ARG AGENT_DIR COPY ${AGENT_DIR} /opt/python \ No newline at end of file diff --git a/Dockerfile.wolfi b/Dockerfile.wolfi new file mode 100644 index 000000000..be090b304 --- /dev/null +++ b/Dockerfile.wolfi @@ -0,0 +1,3 @@ +FROM docker.elastic.co/wolfi/chainguard-base:latest@sha256:17c8370b33443a247d2b60ca6b19c6f2fe81e4b90a2de3fa3e42665bcf7a346f +ARG AGENT_DIR +COPY ${AGENT_DIR} /opt/python \ No newline at end of file diff --git a/Makefile b/Makefile index 51f7a4eb6..90dcd4744 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ test: # delete any __pycache__ folders to avoid hard-to-debug caching issues find . -type f -name '*.py[co]' -delete -o -type d -name __pycache__ -delete # pypy3 should be added to the first `if` once it supports py3.7 - if [[ "$$PYTHON_VERSION" =~ ^(3.7|3.8|3.9|3.10|3.11|3.12|nightly)$$ ]] ; then \ + if [[ "$$PYTHON_VERSION" =~ ^(3.7|3.8|3.9|3.10|3.11|3.12|3.13|3.14|nightly)$$ ]] ; then \ echo "Python 3.7+, with asyncio"; \ pytest -v $(PYTEST_ARGS) --showlocals $(PYTEST_MARKER) $(PYTEST_JUNIT); \ else \ diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 4ff826c5b..000000000 --- a/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -Thanks for your interest in the security of our products. -Our security policy can be found at [https://www.elastic.co/community/security](https://www.elastic.co/community/security). - -## Reporting a Vulnerability -Please send security vulnerability reports to security@elastic.co. diff --git a/dev-utils/make-packages.sh b/dev-utils/make-packages.sh new file mode 100755 index 000000000..27e63fcac --- /dev/null +++ b/dev-utils/make-packages.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Make a Python APM agent distribution +# + +echo "::group::Install build" +pip install --user build +echo "::endgroup::" + +echo "::group::Building packages" +python -m build +echo "::endgroup::" diff --git a/dev-utils/requirements.txt b/dev-utils/requirements.txt index 353f9d8dc..8b112eafa 100644 --- a/dev-utils/requirements.txt +++ b/dev-utils/requirements.txt @@ -1,4 +1,4 @@ # These are the pinned requirements for the lambda layer/docker image -certifi==2023.7.22 -urllib3==1.26.18 +certifi==2026.1.4 +urllib3==1.26.20 wrapt==1.14.1 diff --git a/docs/advanced-topics.asciidoc b/docs/advanced-topics.asciidoc deleted file mode 100644 index f2aa3c0d3..000000000 --- a/docs/advanced-topics.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -[[advanced-topics]] -== Advanced Topics - -* <> -* <> -* <> -* <> - -include::./custom-instrumentation.asciidoc[Custom Instrumentation] -include::./sanitizing-data.asciidoc[Sanitizing Data] -include::./how-the-agent-works.asciidoc[How the Agent works] -include::./run-tests-locally.asciidoc[Run Tests Locally] - diff --git a/docs/aiohttp-server.asciidoc b/docs/aiohttp-server.asciidoc deleted file mode 100644 index 357aa79b3..000000000 --- a/docs/aiohttp-server.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -[[aiohttp-server-support]] -=== Aiohttp Server support - -Getting Elastic APM set up for your Aiohttp Server project is easy, -and there are various ways you can tweak it to fit to your needs. - -[float] -[[aiohttp-server-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[aiohttp-server-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, -the application's settings, or as initialization arguments. - -You can find a list of all available settings in the <> page. - -To initialize the agent for your application using environment variables: - -[source,python] ----- -from aiohttp import web - -from elasticapm.contrib.aiohttp import ElasticAPM - -app = web.Application() - -apm = ElasticAPM(app) ----- - -To configure the agent using `ELASTIC_APM` in your application's settings: - -[source,python] ----- -from aiohttp import web - -from elasticapm.contrib.aiohttp import ElasticAPM - -app = web.Application() - -app['ELASTIC_APM'] = { - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', -} -apm = ElasticAPM(app) ----- - -[float] -[[aiohttp-server-usage]] -==== Usage - -Once you have configured the agent, -it will automatically track transactions and capture uncaught exceptions within aiohttp. - -Capture an arbitrary exception by calling <>: - -[source,python] ----- -try: - 1 / 0 -except ZeroDivisionError: - apm.client.capture_exception() ----- - -Log a generic message with <>: - -[source,python] ----- -apm.client.capture_message('hello, world!') ----- - -[float] -[[aiohttp-server-performance-metrics]] -==== Performance metrics - -If you've followed the instructions above, the agent has already installed our middleware. -This will measure response times, as well as detailed performance data for all supported technologies. - -NOTE: due to the fact that `asyncio` drivers are usually separate from their synchronous counterparts, -specific instrumentation is needed for all drivers. -The support for asynchronous drivers is currently quite limited. - -[float] -[[aiohttp-server-ignoring-specific-views]] -===== Ignoring specific routes - -You can use the <> configuration option to ignore specific routes. -The list given should be a list of regular expressions which are matched against the transaction name: - -[source,python] ----- -app['ELASTIC_APM'] = { - # ... - 'TRANSACTIONS_IGNORE_PATTERNS': ['^OPTIONS ', '/api/'] - # ... -} ----- - -This would ignore any requests using the `OPTIONS` method -and any requests containing `/api/`. - - - -[float] -[[supported-aiohttp-and-python-versions]] -==== Supported aiohttp and Python versions - -A list of supported <> and <> versions can be found on our <> page. - -NOTE: Elastic APM only supports `asyncio` when using Python 3.7+ diff --git a/docs/api.asciidoc b/docs/api.asciidoc deleted file mode 100644 index 327ddee71..000000000 --- a/docs/api.asciidoc +++ /dev/null @@ -1,551 +0,0 @@ -[[api]] -== API reference - -The Elastic APM Python agent has several public APIs. -Most of the public API functionality is not needed when using one of our <>, -but they allow customized usage. - -[float] -[[client-api]] -=== Client API - -The public Client API consists of several methods on the `Client` class. -This API can be used to track exceptions and log messages, -as well as to mark the beginning and end of transactions. - -[float] -[[client-api-init]] -==== Instantiation - -[small]#Added in v1.0.0.# - -To create a `Client` instance, import it and call its constructor: - -[source,python] ----- -from elasticapm import Client - -client = Client({'SERVICE_NAME': 'example'}, **defaults) ----- - - * `config`: A dictionary, with key/value configuration. For the possible configuration keys, see <>. - * `**defaults`: default values for configuration. These can be omitted in most cases, and take the least precedence. - -NOTE: framework integrations like <> and <> -instantiate the client automatically. - -[float] -[[api-get-client]] -===== `elasticapm.get_client()` - -[small]#Added in v6.1.0. - -Retrieves the `Client` singleton. This is useful for many framework integrations, -where the client is instantiated automatically. - -[source,python] ----- -client = elasticapm.get_client() -client.capture_message('foo') ----- - -[float] -[[error-api]] -==== Errors - -[float] -[[client-api-capture-exception]] -===== `Client.capture_exception()` - -[small]#Added in v1.0.0. `handled` added in v2.0.0.# - -Captures an exception object: - -[source,python] ----- -try: - x = int("five") -except ValueError: - client.capture_exception() ----- - - * `exc_info`: A `(type, value, traceback)` tuple as returned by https://docs.python.org/3/library/sys.html#sys.exc_info[`sys.exc_info()`]. If not provided, it will be captured automatically. - * `date`: A `datetime.datetime` object representing the occurrence time of the error. If left empty, it defaults to `datetime.datetime.utcnow()`. - * `context`: A dictionary with contextual information. This dictionary must follow the - {apm-guide-ref}/api-error.html[Context] schema definition. - * `custom`: A dictionary of custom data you want to attach to the event. - * `handled`: A boolean to indicate if this exception was handled or not. - -Returns the id of the error as a string. - -[float] -[[client-api-capture-message]] -===== `Client.capture_message()` - -[small]#Added in v1.0.0.# - -Captures a message with optional added contextual data. Example: - -[source,python] ----- -client.capture_message('Billing process succeeded.') ----- - - * `message`: The message as a string. - * `param_message`: Alternatively, a parameterized message as a dictionary. - The dictionary contains two values: `message`, and `params`. - This allows the APM Server to group messages together that share the same - parameterized message. Example: -+ -[source,python] ----- -client.capture_message(param_message={ - 'message': 'Billing process for %s succeeded. Amount: %s', - 'params': (customer.id, order.total_amount), -}) ----- -+ - * `stack`: If set to `True` (the default), a stacktrace from the call site will be captured. - * `exc_info`: A `(type, value, traceback)` tuple as returned by - https://docs.python.org/3/library/sys.html#sys.exc_info[`sys.exc_info()`]. - If not provided, it will be captured automatically, if `capture_message()` was called in an `except` block. - * `date`: A `datetime.datetime` object representing the occurrence time of the error. - If left empty, it defaults to `datetime.datetime.utcnow()`. - * `context`: A dictionary with contextual information. This dictionary must follow the - {apm-guide-ref}/api-error.html[Context] schema definition. - * `custom`: A dictionary of custom data you want to attach to the event. - -Returns the id of the message as a string. - -NOTE: Either the `message` or the `param_message` argument is required. - -[float] -[[transaction-api]] -==== Transactions - -[float] -[[client-api-begin-transaction]] -===== `Client.begin_transaction()` - -[small]#Added in v1.0.0. `trace_parent` support added in v5.6.0.# - -Begin tracking a transaction. -Should be called e.g. at the beginning of a request or when starting a background task. Example: - -[source,python] ----- -client.begin_transaction('processors') ----- - - * `transaction_type`: (*required*) A string describing the type of the transaction, e.g. `'request'` or `'celery'`. - * `trace_parent`: (*optional*) A `TraceParent` object. See <>. - * `links`: (*optional*) A list of `TraceParent` objects to which this transaction is causally linked. - -[float] -[[client-api-end-transaction]] -===== `Client.end_transaction()` - -[small]#Added in v1.0.0.# - -End tracking the transaction. -Should be called e.g. at the end of a request or when ending a background task. Example: - -[source,python] ----- -client.end_transaction('myapp.billing_process', processor.status) ----- - - * `name`: (*optional*) A string describing the name of the transaction, e.g. `process_order`. - This is typically the name of the view/controller that handles the request, or the route name. - * `result`: (*optional*) A string describing the result of the transaction. - This is typically the HTTP status code, or e.g. `'success'` for a background task. - -NOTE: if `name` and `result` are not set in the `end_transaction()` call, -they have to be set beforehand by calling <> and <> during the transaction. - -[float] -[[traceparent-api]] -==== `TraceParent` - -Transactions can be started with a `TraceParent` object. This creates a -transaction that is a child of the `TraceParent`, which is essential for -distributed tracing. - -[float] -[[api-traceparent-from-string]] -===== `elasticapm.trace_parent_from_string()` - -[small]#Added in v5.6.0.# - -Create a `TraceParent` object from the string representation generated by -`TraceParent.to_string()`: - -[source,python] ----- -parent = elasticapm.trace_parent_from_string('00-03d67dcdd62b7c0f7a675424347eee3a-5f0e87be26015733-01') -client.begin_transaction('processors', trace_parent=parent) ----- - - * `traceparent_string`: (*required*) A string representation of a `TraceParent` object. - - -[float] -[[api-traceparent-from-headers]] -===== `elasticapm.trace_parent_from_headers()` - -[small]#Added in v5.6.0.# - -Create a `TraceParent` object from HTTP headers (usually generated by another -Elastic APM agent): - -[source,python] ----- -parent = elasticapm.trace_parent_from_headers(headers_dict) -client.begin_transaction('processors', trace_parent=parent) ----- - - * `headers`: (*required*) HTTP headers formed as a dictionary. - -[float] -[[api-traceparent-get-header]] -===== `elasticapm.get_trace_parent_header()` - -[small]#Added in v5.10.0.# - -Return the string representation of the current transaction `TraceParent` object: - -[source,python] ----- -elasticapm.get_trace_parent_header() ----- - -[float] -[[api-other]] -=== Other APIs - -[float] -[[api-elasticapm-instrument]] -==== `elasticapm.instrument()` - -[small]#Added in v1.0.0.# - -Instruments libraries automatically. -This includes a wide range of standard library and 3rd party modules. -A list of instrumented modules can be found in `elasticapm.instrumentation.register`. -This function should be called as early as possibly in the startup of your application. -For <>, this is called automatically. Example: - -[source,python] ----- -import elasticapm - -elasticapm.instrument() ----- - -[float] -[[api-set-transaction-name]] -==== `elasticapm.set_transaction_name()` - -[small]#Added in v1.0.0.# - -Set the name of the current transaction. -For supported frameworks, the transaction name is determined automatically, -and can be overridden using this function. Example: - -[source,python] ----- -import elasticapm - -elasticapm.set_transaction_name('myapp.billing_process') ----- - - * `name`: (*required*) A string describing name of the transaction - * `override`: if `True` (the default), overrides any previously set transaction name. - If `False`, only sets the name if the transaction name hasn't already been set. - -[float] -[[api-set-transaction-result]] -==== `elasticapm.set_transaction_result()` - -[small]#Added in v2.2.0.# - -Set the result of the current transaction. -For supported frameworks, the transaction result is determined automatically, -and can be overridden using this function. Example: - -[source,python] ----- -import elasticapm - -elasticapm.set_transaction_result('SUCCESS') ----- - - * `result`: (*required*) A string describing the result of the transaction, e.g. `HTTP 2xx` or `SUCCESS` - * `override`: if `True` (the default), overrides any previously set result. - If `False`, only sets the result if the result hasn't already been set. - -[float] -[[api-set-transaction-outcome]] -==== `elasticapm.set_transaction_outcome()` - -[small]#Added in v5.9.0.# - -Sets the outcome of the transaction. The value can either be `"success"`, `"failure"` or `"unknown"`. -This should only be called at the end of a transaction after the outcome is determined. - -The `outcome` is used for error rate calculations. -`success` denotes that a transaction has concluded successful, while `failure` indicates that the transaction failed -to finish successfully. -If the `outcome` is set to `unknown`, the transaction will not be included in error rate calculations. - -For supported web frameworks, the transaction outcome is set automatically if it has not been set yet, based on the -HTTP status code. -A status code below `500` is considered a `success`, while any value of `500` or higher is counted as a `failure`. - -If your transaction results in an HTTP response, you can alternatively provide the HTTP status code. - -NOTE: While the `outcome` and `result` field look very similar, they serve different purposes. - Other than the `result` field, which canhold an arbitrary string value, - `outcome` is limited to three different values, - `"success"`, `"failure"` and `"unknown"`. - This allows the APM app to perform error rate calculations on these values. - -Example: - -[source,python] ----- -import elasticapm - -elasticapm.set_transaction_outcome("success") - -# Using an HTTP status code -elasticapm.set_transaction_outcome(http_status_code=200) - -# Using predefined constants: - -from elasticapm.conf.constants import OUTCOME - -elasticapm.set_transaction_outcome(OUTCOME.SUCCESS) -elasticapm.set_transaction_outcome(OUTCOME.FAILURE) -elasticapm.set_transaction_outcome(OUTCOME.UNKNOWN) ----- - - * `outcome`: One of `"success"`, `"failure"` or `"unknown"`. Can be omitted if `http_status_code` is provided. - * `http_status_code`: if the transaction represents an HTTP response, its status code can be provided to determine the `outcome` automatically. - * `override`: if `True` (the default), any previously set `outcome` will be overriden. - If `False`, the outcome will only be set if it was not set before. - - -[float] -[[api-get-transaction-id]] -==== `elasticapm.get_transaction_id()` - -[small]#Added in v5.2.0.# - -Get the id of the current transaction. Example: - -[source,python] ----- -import elasticapm - -transaction_id = elasticapm.get_transaction_id() ----- - - -[float] -[[api-get-trace-id]] -==== `elasticapm.get_trace_id()` - -[small]#Added in v5.2.0.# - -Get the `trace_id` of the current transaction's trace. Example: - -[source,python] ----- -import elasticapm - -trace_id = elasticapm.get_trace_id() ----- - - -[float] -[[api-get-span-id]] -==== `elasticapm.get_span_id()` - -[small]#Added in v5.2.0.# - -Get the id of the current span. Example: - -[source,python] ----- -import elasticapm - -span_id = elasticapm.get_span_id() ----- - - -[float] -[[api-set-custom-context]] -==== `elasticapm.set_custom_context()` - -[small]#Added in v2.0.0.# - -Attach custom contextual data to the current transaction and errors. -Supported frameworks will automatically attach information about the HTTP request and the logged in user. -You can attach further data using this function. - -TIP: Before using custom context, ensure you understand the different types of -{apm-guide-ref}/data-model-metadata.html[metadata] that are available. - -Example: - -[source,python] ----- -import elasticapm - -elasticapm.set_custom_context({'billing_amount': product.price * item_count}) ----- - - * `data`: (*required*) A dictionary with the data to be attached. This should be a flat key/value `dict` object. - -NOTE: `.`, `*`, and `"` are invalid characters for key names and will be replaced with `_`. - - -Errors that happen after this call will also have the custom context attached to them. -You can call this function multiple times, new context data will be merged with existing data, -following the `update()` semantics of Python dictionaries. - -[float] -[[api-set-user-context]] -==== `elasticapm.set_user_context()` - -[small]#Added in v2.0.0.# - -Attach information about the currently logged in user to the current transaction and errors. -Example: - -[source,python] ----- -import elasticapm - -elasticapm.set_user_context(username=user.username, email=user.email, user_id=user.id) ----- - - * `username`: The username of the logged in user - * `email`: The email of the logged in user - * `user_id`: The unique identifier of the logged in user, e.g. the primary key value - -Errors that happen after this call will also have the user context attached to them. -You can call this function multiple times, new user data will be merged with existing data, -following the `update()` semantics of Python dictionaries. - - -[float] -[[api-capture-span]] -==== `elasticapm.capture_span` - -[small]#Added in v4.1.0.# - -Capture a custom span. -This can be used either as a function decorator or as a context manager (in a `with` statement). -When used as a decorator, the name of the span will be set to the name of the function. -When used as a context manager, a name has to be provided. - -[source,python] ----- -import elasticapm - -@elasticapm.capture_span() -def coffee_maker(strength): - fetch_water() - - with elasticapm.capture_span('near-to-machine', labels={"type": "arabica"}): - insert_filter() - for i in range(strength): - pour_coffee() - - start_drip() - - fresh_pots() ----- - - * `name`: The name of the span. Defaults to the function name if used as a decorator. - * `span_type`: (*optional*) The type of the span, usually in a dot-separated hierarchy of `type`, `subtype`, and `action`, e.g. `db.mysql.query`. Alternatively, type, subtype and action can be provided as three separate arguments, see `span_subtype` and `span_action`. - * `skip_frames`: (*optional*) The number of stack frames to skip when collecting stack traces. Defaults to `0`. - * `leaf`: (*optional*) if `True`, all spans nested bellow this span will be ignored. Defaults to `False`. - * `labels`: (*optional*) a dictionary of labels. Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`). Defaults to `None`. - * `span_subtype`: (*optional*) subtype of the span, e.g. name of the database. Defaults to `None`. - * `span_action`: (*optional*) action of the span, e.g. `query`. Defaults to `None`. - * `links`: (*optional*) A list of `TraceParent` objects to which this span is causally linked. - - -[float] -[[api-async-capture-span]] -==== `elasticapm.async_capture_span` - -[small]#Added in v5.4.0.# - -Capture a custom async-aware span. -This can be used either as a function decorator or as a context manager (in an `async with` statement). -When used as a decorator, the name of the span will be set to the name of the function. -When used as a context manager, a name has to be provided. - -[source,python] ----- -import elasticapm - -@elasticapm.async_capture_span() -async def coffee_maker(strength): - await fetch_water() - - async with elasticapm.async_capture_span('near-to-machine', labels={"type": "arabica"}): - await insert_filter() - async for i in range(strength): - await pour_coffee() - - start_drip() - - fresh_pots() ----- - - * `name`: The name of the span. Defaults to the function name if used as a decorator. - * `span_type`: (*optional*) The type of the span, usually in a dot-separated hierarchy of `type`, `subtype`, and `action`, e.g. `db.mysql.query`. Alternatively, type, subtype and action can be provided as three separate arguments, see `span_subtype` and `span_action`. - * `skip_frames`: (*optional*) The number of stack frames to skip when collecting stack traces. Defaults to `0`. - * `leaf`: (*optional*) if `True`, all spans nested bellow this span will be ignored. Defaults to `False`. - * `labels`: (*optional*) a dictionary of labels. Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`). Defaults to `None`. - * `span_subtype`: (*optional*) subtype of the span, e.g. name of the database. Defaults to `None`. - * `span_action`: (*optional*) action of the span, e.g. `query`. Defaults to `None`. - * `links`: (*optional*) A list of `TraceParent` objects to which this span is causally linked. - -NOTE: `asyncio` is only supported for Python 3.7+. - -[float] -[[api-label]] -==== `elasticapm.label()` - -[small]#Added in v5.0.0.# - -Attach labels to the the current transaction and errors. - -TIP: Before using custom labels, ensure you understand the different types of -{apm-guide-ref}/data-model-metadata.html[metadata] that are available. - -Example: - -[source,python] ----- -import elasticapm - -elasticapm.label(ecommerce=True, dollar_value=47.12) ----- - -Errors that happen after this call will also have the labels attached to them. -You can call this function multiple times, new labels will be merged with existing labels, -following the `update()` semantics of Python dictionaries. - -Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`) -`.`, `*`, and `"` are invalid characters for label names and will be replaced with `_`. - -WARNING: Avoid defining too many user-specified labels. -Defining too many unique fields in an index is a condition that can lead to a -{ref}/mapping.html#mapping-limit-settings[mapping explosion]. diff --git a/docs/asgi-middleware.asciidoc b/docs/asgi-middleware.asciidoc deleted file mode 100644 index 75607d8bc..000000000 --- a/docs/asgi-middleware.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -[[asgi-middleware]] -=== ASGI Middleware - -experimental::[] - -Incorporating Elastic APM into your ASGI-based project only requires a few easy -steps. - -NOTE: Several ASGI frameworks are supported natively. -Please check <> for more information - -[float] -[[asgi-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[asgi-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, or as -initialization arguments. - -You can find a list of all available settings in the -<> page. - -To set up the APM agent, wrap your ASGI app with the `ASGITracingMiddleware`: - -[source,python] ----- -from elasticapm.contrib.asgi import ASGITracingMiddleware - -app = MyGenericASGIApp() # depending on framework - -app = ASGITracingMiddleware(app) - ----- - -Make sure to call <> with an appropriate transaction name in all your routes. - -NOTE: Currently, the agent doesn't support automatic capturing of exceptions. -You can follow progress on this issue on https://github.com/elastic/apm-agent-python/issues/1548[Github]. - -[float] -[[supported-python-versions]] -==== Supported Python versions - -A list of supported <> versions can be found on our <> page. - -NOTE: Elastic APM only supports `asyncio` when using Python 3.7+ diff --git a/docs/configuration.asciidoc b/docs/configuration.asciidoc deleted file mode 100644 index 112d4ca3e..000000000 --- a/docs/configuration.asciidoc +++ /dev/null @@ -1,1387 +0,0 @@ -[[configuration]] -== Configuration - -To adapt the Elastic APM agent to your needs, configure it using environment variables or framework-specific -configuration. - -You can either configure the agent by setting environment variables: -[source,bash] ----- -ELASTIC_APM_SERVICE_NAME=foo python manage.py runserver ----- - -or with inline configuration: - -[source,python] ----- -apm_client = Client(service_name="foo") ----- - -or by using framework specific configuration e.g. in your Django `settings.py` file: - -[source,python] ----- -ELASTIC_APM = { - "SERVICE_NAME": "foo", -} ----- - -The precedence is as follows: - - * <> -(supported options are marked with <>) - * Environment variables - * Inline configuration - * Framework-specific configuration - * Default value - -[float] -[[dynamic-configuration]] -=== Dynamic configuration - -Configuration options marked with the image:./images/dynamic-config.svg[] badge can be changed at runtime -when set from a supported source. - -The Python Agent supports {apm-app-ref}/agent-configuration.html[Central configuration], -which allows you to fine-tune certain configurations from in the APM app. -This feature is enabled in the Agent by default with <>. - -[float] -[[django-configuration]] -=== Django - -To configure Django, add an `ELASTIC_APM` dictionary to your `settings.py`: - -[source,python] ----- -ELASTIC_APM = { - 'SERVICE_NAME': 'my-app', - 'SECRET_TOKEN': 'changeme', -} ----- - -[float] -[[flask-configuration]] -=== Flask - -To configure Flask, add an `ELASTIC_APM` dictionary to your `app.config`: - -[source,python] ----- -app.config['ELASTIC_APM'] = { - 'SERVICE_NAME': 'my-app', - 'SECRET_TOKEN': 'changeme', -} - -apm = ElasticAPM(app) ----- - -[float] -[[core-options]] -=== Core options - -[float] -[[config-service-name]] -==== `service_name` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_SERVICE_NAME` | `SERVICE_NAME` | `unknown-python-service` | `my-app` -|============ - - -The name of your service. -This is used to keep all the errors and transactions of your service together -and is the primary filter in the Elastic APM user interface. - -While a default is provided, it is essential that you override this default -with something more descriptive and unique across your infrastructure. - -NOTE: The service name must conform to this regular expression: `^[a-zA-Z0-9 _-]+$`. -In other words, the service name must only contain characters from the ASCII -alphabet, numbers, dashes, underscores, and spaces. It cannot be an empty string -or whitespace-only. - -[float] -[[config-server-url]] -==== `server_url` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SERVER_URL` | `SERVER_URL` | `'http://127.0.0.1:8200'` -|============ - -The URL for your APM Server. -The URL must be fully qualified, including protocol (`http` or `https`) and port. -Note: Do not set this if you are using APM in an AWS lambda function. APM Agents are designed to proxy their calls to the APM Server through the lambda extension. Instead, set `ELASTIC_APM_LAMBDA_APM_SERVER`. For more info, see <>. - -[float] -[[config-enabled]] -=== `enabled` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_ENABLED` | `ENABLED` | `true` -|============ - -Enable or disable the agent. -When set to false, the agent will not collect any data or start any background threads. - - -[float] -[[config-recording]] -=== `recording` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_RECORDING` | `RECORDING` | `true` -|============ - -Enable or disable recording of events. -If set to false, then the Python agent does not send any events to the Elastic APM server, -and instrumentation overhead is minimized. The agent will continue to poll the server for configuration changes. - - -[float] -[[logging-options]] -=== Logging Options - -[float] -[[config-log_level]] -==== `log_level` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_LOG_LEVEL` | `LOG_LEVEL` | -|============ - -The `logging.logLevel` at which the `elasticapm` logger will log. The available -options are: - -* `"off"` (sets `logging.logLevel` to 1000) -* `"critical"` -* `"error"` -* `"warning"` -* `"info"` -* `"debug"` -* `"trace"` (sets `logging.log_level` to 5) - -Options are case-insensitive - -Note that this option doesn't do anything with logging handlers. In order -for any logs to be visible, you must either configure a handler -(https://docs.python.org/3/library/logging.html#logging.basicConfig[`logging.basicConfig`] -will do this for you) or set <>. This will also override -any log level your app has set for the `elasticapm` logger. - -[float] -[[config-log_file]] -==== `log_file` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_LOG_FILE` | `LOG_FILE` | `""` | `"/var/log/elasticapm/log.txt"` -|============ - -This enables the agent to log to a file. This is disabled by default. The agent will log -at the `logging.logLevel` configured with <>. Use -<> to configure the maximum size of the log file. This log -file will automatically rotate. - -Note that setting <> is required for this setting to do -anything. - -If https://github.com/elastic/ecs-logging-python[`ecs_logging`] is installed, -the logs will automatically be formatted as ecs-compatible json. - -[float] -[[config-log_file_size]] -==== `log_file_size` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_LOG_FILE_SIZE` | `LOG_FILE_SIZE` | `"50mb"` | `"100mb"` -|============ - -The size of the log file if <> is set. - -The agent always keeps one backup file when rotating, so the maximum space that -the log files will consume is twice the value of this setting. - -[float] -[[config-log_ecs_reformatting]] -==== `log_ecs_reformatting` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_LOG_ECS_REFORMATTING` | `LOG_ECS_REFORMATTING` | `"off"` -|============ - -experimental::[] - -Valid options: - -* `"off"` -* `"override"` - -If https://github.com/elastic/ecs-logging-python[`ecs_logging`] is installed, -setting this to `"override"` will cause the agent to automatically attempt to enable -ecs-formatted logging. - -For base `logging` from the standard library, the agent will get the root -logger, find any attached handlers, and for each, set the formatter to -`ecs_logging.StdlibFormatter()`. - -If `structlog` is installed, the agent will override any configured processors -with `ecs_logging.StructlogFormatter()`. - -Note that this is a very blunt instrument that could have unintended side effects. -If problems arise, please apply these formatters manually and leave this setting -as `"off"`. See the -https://www.elastic.co/guide/en/ecs-logging/python/current/installation.html[`ecs_logging` docs] -for more information about using these formatters. - -Also note that this setting does not facilitate shipping logs to Elasticsearch. -We recommend https://www.elastic.co/beats/filebeat[Filebeat] for that purpose. - -[float] -[[other-options]] -=== Other options - -[float] -[[config-transport-class]] -==== `transport_class` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_TRANSPORT_CLASS` | `TRANSPORT_CLASS` | `elasticapm.transport.http.Transport` -|============ - - -The transport class to use when sending events to the APM Server. - -[float] -[[config-service-node-name]] -==== `service_node_name` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_SERVICE_NODE_NAME` | `SERVICE_NODE_NAME` | `None` | `"redis1"` -|============ - -The name of the given service node. This is optional and if omitted, the APM -Server will fall back on `system.container.id` if available, and -`host.name` if necessary. - -This option allows you to set the node name manually to ensure it is unique and meaningful. - -[float] -[[config-environment]] -==== `environment` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_ENVIRONMENT` | `ENVIRONMENT` | `None` | `"production"` -|============ - -The name of the environment this service is deployed in, -e.g. "production" or "staging". - -Environments allow you to easily filter data on a global level in the APM app. -It's important to be consistent when naming environments across agents. -See {apm-app-ref}/filters.html#environment-selector[environment selector] in the APM app for more information. - -NOTE: This feature is fully supported in the APM app in Kibana versions >= 7.2. -You must use the query bar to filter for a specific environment in versions prior to 7.2. - -[float] -[[config-cloud-provider]] -==== `cloud_provider` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_CLOUD_PROVIDER` | `CLOUD_PROVIDER` | `"auto"` | `"aws"` -|============ - -This config value allows you to specify which cloud provider should be assumed -for metadata collection. By default, the agent will attempt to detect the cloud -provider or, if that fails, will use trial and error to collect the metadata. - -Valid options are `"auto"`, `"aws"`, `"gcp"`, and `"azure"`. If this config value is set -to `"none"`, then no cloud metadata will be collected. - -[float] -[[config-secret-token]] -==== `secret_token` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_SECRET_TOKEN` | `SECRET_TOKEN` | `None` | A random string -|============ - -This string is used to ensure that only your agents can send data to your APM Server. -Both the agents and the APM Server have to be configured with the same secret token. -An example to generate a secure secret token is: - -[source,bash] ----- -python -c "import secrets; print(secrets.token_urlsafe(32))" ----- - -WARNING: Secret tokens only provide any security if your APM Server uses TLS. - -[float] -[[config-api-key]] -==== `api_key` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_API_KEY` | `API_KEY` | `None` | A base64-encoded string -|============ - -experimental::[] - -// TODO: add link to APM Server API Key docs once the docs are released - -This base64-encoded string is used to ensure that only your agents can send data to your APM Server. -The API key must be created using the {apm-guide-ref}/api-key.html[APM server command-line tool]. - -WARNING: API keys only provide any real security if your APM Server uses TLS. - -[float] -[[config-service-version]] -==== `service_version` -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_SERVICE_VERSION` | `SERVICE_VERSION` | `None` | A string indicating the version of the deployed service -|============ - -A version string for the currently deployed version of the service. -If youre deploys are not versioned, the recommended value for this field is the commit identifier of the deployed revision, e.g. the output of `git rev-parse HEAD`. - -[float] -[[config-framework-name]] -==== `framework_name` -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_FRAMEWORK_NAME` | `FRAMEWORK_NAME` | Depending on framework -|============ - -The name of the used framework. -For Django and Flask, this defaults to `django` and `flask` respectively, -otherwise, the default is `None`. - - -[float] -[[config-framework-version]] -==== `framework_version` -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_FRAMEWORK_VERSION` | `FRAMEWORK_VERSION` | Depending on framework -|============ - -The version number of the used framework. -For Django and Flask, this defaults to the used version of the framework, -otherwise, the default is `None`. - -[float] -[[config-filter-exception-types]] -==== `filter_exception_types` -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_FILTER_EXCEPTION_TYPES` | `FILTER_EXCEPTION_TYPES` | `[]` | `['OperationalError', 'mymodule.SomeoneElsesProblemError']` -| multiple values separated by commas, without spaces ||| -|============ - -A list of exception types to be filtered. -Exceptions of these types will not be sent to the APM Server. - - -[float] -[[config-transaction-ignore-urls]] -==== `transaction_ignore_urls` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_TRANSACTION_IGNORE_URLS` | `TRANSACTION_IGNORE_URLS` | `[]` | `['/api/ping', '/static/*']` -| multiple values separated by commas, without spaces ||| -|============ - -A list of URLs for which the agent should not capture any transaction data. - -Optionally, `*` can be used to match multiple URLs at once. - -[float] -[[config-transactions-ignore-patterns]] -==== `transactions_ignore_patterns` -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_TRANSACTIONS_IGNORE_PATTERNS` | `TRANSACTIONS_IGNORE_PATTERNS` | `[]` | `['^OPTIONS ', 'myviews.Healthcheck']` -| multiple values separated by commas, without spaces ||| -|============ - -A list of regular expressions. -Transactions with a name that matches any of the configured patterns will be ignored and not sent to the APM Server. - -NOTE: as the the name of the transaction can only be determined at the end of the transaction, -the agent might still cause overhead for transactions ignored through this setting. -If agent overhead is a concern, we recommend <> instead. - -[float] -[[config-server-timeout]] -==== `server_timeout` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SERVER_TIMEOUT` | `SERVER_TIMEOUT` | `"5s"` -|============ - -A timeout for requests to the APM Server. -The setting has to be provided in *<>*. -If a request to the APM Server takes longer than the configured timeout, -the request is cancelled and the event (exception or transaction) is discarded. -Set to `None` to disable timeouts. - -WARNING: If timeouts are disabled or set to a high value, -your app could experience memory issues if the APM Server times out. - - -[float] -[[config-hostname]] -==== `hostname` - -[options="header"] -|============ -| Environment | Django/Flask | Default | Example -| `ELASTIC_APM_HOSTNAME` | `HOSTNAME` | `socket.gethostname()` | `app-server01.example.com` -|============ - -The host name to use when sending error and transaction data to the APM Server. - -[float] -[[config-auto-log-stacks]] -==== `auto_log_stacks` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_AUTO_LOG_STACKS` | `AUTO_LOG_STACKS` | `True` -| set to `"true"` / `"false"` ||| -|============ - -If set to `True` (the default), the agent will add a stack trace to each log event, -indicating where the log message has been issued. - -This setting can be overridden on an individual basis by setting the `extra`-key `stack`: - -[source,python] ----- -logger.info('something happened', extra={'stack': False}) ----- - -[float] -[[config-collect-local-variables]] -==== `collect_local_variables` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_COLLECT_LOCAL_VARIABLES` | `COLLECT_LOCAL_VARIABLES` | `errors` -|============ - -Possible values: `errors`, `transactions`, `all`, `off` - -The Elastic APM Python agent can collect local variables for stack frames. -By default, this is only done for errors. - -NOTE: Collecting local variables has a non-trivial overhead. -Collecting local variables for transactions in production environments -can have adverse effects for the performance of your service. - -[float] -[[config-local-var-max-length]] -==== `local_var_max_length` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_LOCAL_VAR_MAX_LENGTH` | `LOCAL_VAR_MAX_LENGTH` | `200` -|============ - -When collecting local variables, they will be converted to strings. -This setting allows you to limit the length of the resulting string. - - -[float] -[[config-local-list-var-max-length]] -==== `local_var_list_max_length` - -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_LOCAL_VAR_LIST_MAX_LENGTH` | `LOCAL_VAR_LIST_MAX_LENGTH` | `10` -|============ - -This setting allows you to limit the length of lists in local variables. - - -[float] -[[config-local-dict-var-max-length]] -==== `local_var_dict_max_length` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_LOCAL_VAR_DICT_MAX_LENGTH` | `LOCAL_VAR_DICT_MAX_LENGTH` | `10` -|============ - -This setting allows you to limit the length of dicts in local variables. - - -[float] -[[config-source-lines-error-app-frames]] -==== `source_lines_error_app_frames` -[float] -[[config-source-lines-error-library-frames]] -==== `source_lines_error_library_frames` -[float] -[[config-source-lines-span-app-frames]] -==== `source_lines_span_app_frames` -[float] -[[config-source-lines-span-library-frames]] -==== `source_lines_span_library_frames` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SOURCE_LINES_ERROR_APP_FRAMES` | `SOURCE_LINES_ERROR_APP_FRAMES` | `5` -| `ELASTIC_APM_SOURCE_LINES_ERROR_LIBRARY_FRAMES` | `SOURCE_LINES_ERROR_LIBRARY_FRAMES` | `5` -| `ELASTIC_APM_SOURCE_LINES_SPAN_APP_FRAMES` | `SOURCE_LINES_SPAN_APP_FRAMES` | `0` -| `ELASTIC_APM_SOURCE_LINES_SPAN_LIBRARY_FRAMES` | `SOURCE_LINES_SPAN_LIBRARY_FRAMES` | `0` -|============ - -By default, the APM agent collects source code snippets for errors. -This setting allows you to modify the number of lines of source code that are being collected. - -We differ between errors and spans, as well as library frames and app frames. - -WARNING: Especially for spans, collecting source code can have a large impact on storage use in your Elasticsearch cluster. - -[float] -[[config-capture-body]] -==== `capture_body` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_CAPTURE_BODY` | `CAPTURE_BODY` | `off` -|============ - -For transactions that are HTTP requests, -the Python agent can optionally capture the request body (e.g. `POST` variables). - -Possible values: `errors`, `transactions`, `all`, `off`. - -If the request has a body and this setting is disabled, the body will be shown as `[REDACTED]`. - -For requests with a content type of `multipart/form-data`, -any uploaded files will be referenced in a special `_files` key. -It contains the name of the field and the name of the uploaded file, if provided. - -WARNING: Request bodies often contain sensitive values like passwords and credit card numbers. -If your service handles data like this, we advise to only enable this feature with care. - -[float] -[[config-capture-headers]] -==== `capture_headers` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_CAPTURE_HEADERS` | `CAPTURE_HEADERS` | `true` -|============ - -For transactions and errors that happen due to HTTP requests, -the Python agent can optionally capture the request and response headers. - -Possible values: `true`, `false` - -WARNING: Request headers often contain sensitive values like session IDs and cookies. -See <> for more information on how to filter out sensitive data. - -[float] -[[config-transaction-max-spans]] -==== `transaction_max_spans` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_TRANSACTION_MAX_SPANS` | `TRANSACTION_MAX_SPANS` | `500` -|============ - -This limits the amount of spans that are recorded per transaction. -This is helpful in cases where a transaction creates a very high amount of spans (e.g. thousands of SQL queries). -Setting an upper limit will prevent edge cases from overloading the agent and the APM Server. - -[float] -[[config-stack-trace-limit]] -==== `stack_trace_limit` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_STACK_TRACE_LIMIT` | `STACK_TRACE_LIMIT` | `50` -|============ - -This limits the number of frames captured for each stack trace. - -Setting the limit to `0` will disable stack trace collection, -while any positive integer value will be used as the maximum number of frames to collect. -To disable the limit and always capture all frames, set the value to `-1`. - - -[float] -[[config-span-stack-trace-min-duration]] -==== `span_stack_trace_min_duration` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SPAN_STACK_TRACE_MIN_DURATION` | `SPAN_STACK_TRACE_MIN_DURATION` | `"5ms"` -|============ - -By default, the APM agent collects a stack trace with every recorded span -that has a duration equal to or longer than this configured threshold. While -stack traces are very helpful to find the exact place in your code from which a -span originates, collecting this stack trace does have some overhead. Tune this -threshold to ensure that you only collect stack traces for spans that -could be problematic. - -To collect traces for all spans, regardless of their length, set the value to `0`. - -To disable stack trace collection for spans completely, set the value to `-1`. - -Except for the special values `-1` and `0`, -this setting should be provided in *<>*. - - -[float] -[[config-span-frames-min-duration]] -==== `span_frames_min_duration` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SPAN_FRAMES_MIN_DURATION` | `SPAN_FRAMES_MIN_DURATION` | `"5ms"` -|============ - -NOTE: This config value is being deprecated. Use -<> instead. - - -[float] -[[config-span-compression-enabled]] -==== `span_compression_enabled` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SPAN_COMPRESSION_ENABLED` | `SPAN_COMPRESSION_ENABLED` | `True` -|============ - -Enable/disable span compression. - -If enabled, the agent will compress very short, repeated spans into a single span, -which is beneficial for storage and processing requirements. -Some information is lost in this process, e.g. exact durations of each compressed span. - -[float] -[[config-span-compression-exact-match-max_duration]] -==== `span_compression_exact_match_max_duration` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION` | `SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION` | `"50ms"` -|============ - -Consecutive spans that are exact match and that are under this threshold will be compressed into a single composite span. -This reduces the collection, processing, and storage overhead, and removes clutter from the UI. -The tradeoff is that the DB statements of all the compressed spans will not be collected. - -Two spans are considered exact matches if the following attributes are identical: - * span name - * span type - * span subtype - * destination resource (e.g. the Database name) - -[float] -[[config-span-compression-same-kind-max-duration]] -==== `span_compression_same_kind_max_duration` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION` | `SPAN_COMPRESSION_SAME_KIND_MAX_DURATION` | `"0ms"` (disabled) -|============ - -Consecutive spans to the same destination that are under this threshold will be compressed into a single composite span. -This reduces the collection, processing, and storage overhead, and removes clutter from the UI. -The tradeoff is that metadata such as database statements of all the compressed spans will not be collected. - -Two spans are considered to be of the same kind if the following attributes are identical: - * span type - * span subtype - * destination resource (e.g. the Database name) - -[float] -[[config-exit-span-min-duration]] -==== `exit_span_min_duration` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_EXIT_SPAN_MIN_DURATION` | `EXIT_SPAN_MIN_DURATION` | `"0ms"` -|============ - -Exit spans are spans that represent a call to an external service, like a database. -If such calls are very short, they are usually not relevant and can be ignored. - -This feature is disabled by default. - -NOTE: if a span propagates distributed tracing IDs, it will not be ignored, even if it is shorter than the configured threshold. -This is to ensure that no broken traces are recorded. - -[float] -[[config-api-request-size]] -==== `api_request_size` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_API_REQUEST_SIZE` | `API_REQUEST_SIZE` | `"768kb"` -|============ - -The maximum queue length of the request buffer before sending the request to the APM Server. -A lower value will increase the load on your APM Server, -while a higher value can increase the memory pressure of your app. -A higher value also impacts the time until data is indexed and searchable in Elasticsearch. - -This setting is useful to limit memory consumption if you experience a sudden spike of traffic. -It has to be provided in *<>*. - -NOTE: Due to internal buffering of gzip, the actual request size can be a few kilobytes larger than the given limit. -By default, the APM Server limits request payload size to `1 MByte`. - -[float] -[[config-api-request-time]] -==== `api_request_time` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_API_REQUEST_TIME` | `API_REQUEST_TIME` | `"10s"` -|============ - -The maximum queue time of the request buffer before sending the request to the APM Server. -A lower value will increase the load on your APM Server, -while a higher value can increase the memory pressure of your app. -A higher value also impacts the time until data is indexed and searchable in Elasticsearch. - -This setting is useful to limit memory consumption if you experience a sudden spike of traffic. -It has to be provided in *<>*. - -NOTE: The actual time will vary between 90-110% of the given value, -to avoid stampedes of instances that start at the same time. - -[float] -[[config-processors]] -==== `processors` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_PROCESSORS` | `PROCESSORS` | `['elasticapm.processors.sanitize_stacktrace_locals', - 'elasticapm.processors.sanitize_http_request_cookies', - 'elasticapm.processors.sanitize_http_headers', - 'elasticapm.processors.sanitize_http_wsgi_env', - 'elasticapm.processors.sanitize_http_request_body']` -|============ - -A list of processors to process transactions and errors. -For more information, see <>. - -WARNING: We recommend always including the default set of validators if you customize this setting. - -[float] -[[config-sanitize-field-names]] -==== `sanitize_field_names` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SANITIZE_FIELD_NAMES` | `SANITIZE_FIELD_NAMES` | `["password", - "passwd", - "pwd", - "secret", - "*key", - "*token*", - "*session*", - "*credit*", - "*card*", - "*auth*", - "*principal*", - "set-cookie"]` -|============ - -A list of glob-matched field names to match and mask when using processors. -For more information, see <>. - -WARNING: We recommend always including the default set of field name matches -if you customize this setting. - - -[float] -[[config-transaction-sample-rate]] -==== `transaction_sample_rate` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_TRANSACTION_SAMPLE_RATE` | `TRANSACTION_SAMPLE_RATE` | `1.0` -|============ - -By default, the agent samples every transaction (e.g. request to your service). -To reduce overhead and storage requirements, set the sample rate to a value between `0.0` and `1.0`. -We still record overall time and the result for unsampled transactions, but no context information, labels, or spans. - -NOTE: This setting will be automatically rounded to 4 decimals of precision. - -[float] -[[config-include-paths]] -==== `include_paths` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_INCLUDE_PATHS` | `INCLUDE_PATHS` | `[]` -| multiple values separated by commas, without spaces ||| -|============ - -A set of paths, optionally using shell globs -(see https://docs.python.org/3/library/fnmatch.html[`fnmatch`] for a description of the syntax). -These are matched against the absolute filename of every frame, and if a pattern matches, the frame is considered -to be an "in-app frame". - -`include_paths` *takes precedence* over `exclude_paths`. - -[float] -[[config-exclude-paths]] -==== `exclude_paths` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_EXCLUDE_PATHS` | `EXCLUDE_PATHS` | Varies on Python version and implementation -| multiple values separated by commas, without spaces ||| -|============ - -A set of paths, optionally using shell globs -(see https://docs.python.org/3/library/fnmatch.html[`fnmatch`] for a description of the syntax). -These are matched against the absolute filename of every frame, and if a pattern matches, the frame is considered -to be a "library frame". - -`include_paths` *takes precedence* over `exclude_paths`. - -The default value varies based on your Python version and implementation, e.g.: - - * PyPy3: `['\*/lib-python/3/*', '\*/site-packages/*']` - * CPython 2.7: `['\*/lib/python2.7/*', '\*/lib64/python2.7/*']` - -[float] -[[config-debug]] -==== `debug` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_DEBUG` | `DEBUG` | `False` -|============ - -If your app is in debug mode (e.g. in Django with `settings.DEBUG = True` or in Flask with `app.debug = True`), -the agent won't send any data to the APM Server. You can override it by changing this setting to `True`. - - -[float] -[[config-disable-send]] -==== `disable_send` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_DISABLE_SEND` | `DISABLE_SEND` | `False` -|============ - -If set to `True`, the agent won't send any events to the APM Server, independent of any debug state. - - -[float] -[[config-instrument]] -==== `instrument` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_INSTRUMENT` | `INSTRUMENT` | `True` -|============ - -If set to `False`, the agent won't instrument any code. -This disables most of the tracing functionality, but can be useful to debug possible instrumentation issues. - - -[float] -[[config-verify-server-cert]] -==== `verify_server_cert` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_VERIFY_SERVER_CERT` | `VERIFY_SERVER_CERT` | `True` -|============ - -By default, the agent verifies the SSL certificate if an HTTPS connection to the APM Server is used. -Verification can be disabled by changing this setting to `False`. -This setting is ignored when <> is set. - -[float] -[[config-server-cert]] -==== `server_cert` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SERVER_CERT` | `SERVER_CERT` | `None` -|============ - -If you have configured your APM Server with a self-signed TLS certificate, or you -just wish to pin the server certificate, you can specify the path to the PEM-encoded -certificate via the `ELASTIC_APM_SERVER_CERT` configuration. - -NOTE: If this option is set, the agent only verifies that the certificate provided by the APM Server is -identical to the one configured here. Validity of the certificate is not checked. - -[float] -[[config-server-ca-cert-file]] -==== `server_ca_cert_file` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_SERVER_CA_CERT_FILE` | `SERVER_CA_CERT_FILE` | `None` -|============ - -By default, the agent will validate the TLS/SSL certificate of the APM Server using the well-known CAs curated by Mozilla, -and provided by the https://pypi.org/project/certifi/[`certifi`] package. - -You can set this option to the path of a file containing a CA certificate that will be used instead. - -Specifying this option is required when using self-signed certificates, unless server certificate validation is disabled. - -[float] -[[config-use-certifi]] -==== `use_certifi` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_USE_CERTIFI` | `USE_CERTIFI` | `True` -|============ - -By default, the Python Agent uses the https://pypi.org/project/certifi/[`certifi`] certificate store. -To use Python's default mechanism for finding certificates, set this option to `False`. - -[float] -[[config-metrics_interval]] -==== `metrics_interval` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_METRICS_INTERVAL` | `METRICS_INTERVAL` | `30s` -|============ - - -The interval in which the agent collects metrics. A shorter interval increases the granularity of metrics, -but also increases the overhead of the agent, as well as storage requirements. - -It has to be provided in *<>*. - -[float] -[[config-disable_metrics]] -==== `disable_metrics` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_DISABLE_METRICS` | `DISABLE_METRICS` | `None` -|============ - - -A comma-separated list of dotted metrics names that should not be sent to the APM Server. -You can use `*` to match multiple metrics; for example, to disable all CPU-related metrics, -as well as the "total system memory" metric, set `disable_metrics` to: - -.... -"*.cpu.*,system.memory.total" -.... - -NOTE: This setting only disables the *sending* of the given metrics, not collection. - -[float] -[[config-breakdown_metrics]] -==== `breakdown_metrics` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_BREAKDOWN_METRICS` | `BREAKDOWN_METRICS` | `True` -|============ - -Enable or disable the tracking and collection of breakdown metrics. -Setting this to `False` disables the tracking of breakdown metrics, which can reduce the overhead of the agent. - -NOTE: This feature requires APM Server and Kibana >= 7.3. - -[float] -[[config-prometheus_metrics]] -==== `prometheus_metrics` (Beta) - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_PROMETHEUS_METRICS` | `PROMETHEUS_METRICS` | `False` -|============ - -Enable/disable the tracking and collection of metrics from `prometheus_client`. - -See <> for more information. - -NOTE: This feature is currently in beta status. - -[float] -[[config-prometheus_metrics_prefix]] -==== `prometheus_metrics_prefix` (Beta) - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_PROMETHEUS_METRICS_PREFIX` | `PROMETHEUS_METRICS_PREFIX` | `prometheus.metrics.` -|============ - -A prefix to prepend to Prometheus metrics names. - -See <> for more information. - -NOTE: This feature is currently in beta status. - -[float] -[[config-metrics_sets]] -==== `metrics_sets` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_METRICS_SETS` | `METRICS_SETS` | ["elasticapm.metrics.sets.cpu.CPUMetricSet"] -|============ - -List of import paths for the MetricSets that should be used to collect metrics. - -See <> for more information. - -[float] -[[config-central_config]] -==== `central_config` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_CENTRAL_CONFIG` | `CENTRAL_CONFIG` | `True` -|============ - -When enabled, the agent will make periodic requests to the APM Server to fetch updated configuration. - -See <> for more information. - -NOTE: This feature requires APM Server and Kibana >= 7.3. - -[float] -[[config-global_labels]] -==== `global_labels` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_GLOBAL_LABELS` | `GLOBAL_LABELS` | `None` -|============ - -Labels added to all events, with the format `key=value[,key=value[,...]]`. -Any labels set by application via the API will override global labels with the same keys. - -NOTE: This feature requires APM Server >= 7.2. - -[float] -[[config-generic-disable-log-record-factory]] -==== `disable_log_record_factory` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_DISABLE_LOG_RECORD_FACTORY` | `DISABLE_LOG_RECORD_FACTORY` | `False` -|============ - -By default in python 3, the agent installs a <> that -automatically adds tracing fields to your log records. Disable this -behavior by setting this to `True`. - -[float] -[[config-use-elastic-traceparent-header]] -==== `use_elastic_traceparent_header` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_USE_ELASTIC_TRACEPARENT_HEADER` | `USE_ELASTIC_TRACEPARENT_HEADER` | `True` -|============ - -To enable {apm-guide-ref}/apm-distributed-tracing.html[distributed tracing], -the agent sets a number of HTTP headers to outgoing requests made with <>. -These headers (`traceparent` and `tracestate`) are defined in the https://www.w3.org/TR/trace-context-1/[W3C Trace Context] specification. - -Additionally, when this setting is set to `True`, the agent will set `elasticapm-traceparent` for backwards compatibility. - -[float] -[[config-trace-continuation-strategy]] -==== `trace_continuation_strategy` - -<> - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_TRACE_CONTINUATION_STRATEGY` | `TRACE_CONTINUATION_STRATEGY` | `continue` -|============ - -This option allows some control on how the APM agent handles W3C trace-context headers on incoming requests. -By default, the `traceparent` and `tracestate` headers are used per W3C spec for distributed tracing. -However, in certain cases it can be helpful to *not* use the incoming `traceparent` header. -Some example use cases: - -- An Elastic-monitored service is receiving requests with `traceparent` headers from *unmonitored* services. -- An Elastic-monitored service is publicly exposed, and does not want tracing data (trace-ids, sampling decisions) to possibly be spoofed by user requests. - -Valid values are: - -- `'continue'`: The default behavior. An incoming `traceparent` value is used to continue the trace and determine the sampling decision. -- `'restart'`: Always ignores the `traceparent` header of incoming requests. - A new trace-id will be generated and the sampling decision will be made based on <>. - A *span link* will be made to the incoming traceparent. -- `'restart_external'`: If an incoming request includes the `es` vendor flag in `tracestate`, then any 'traceparent' will be considered internal and will be handled as described for `'continue'` above. - Otherwise, any `'traceparent'` is considered external and will be handled as described for `'restart'` above. - -Starting with Elastic Observability 8.2, span links will be visible in trace -views. - -[float] -[[config-use-elastic-excepthook]] -==== `use_elastic_excepthook` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_USE_ELASTIC_EXCEPTHOOK` | `USE_ELASTIC_EXCEPTHOOK` | `False` -|============ - -If set to `True`, the agent will intercept the default `sys.excepthook`, which -allows the agent to collect all uncaught exceptions. - -[float] -[[config-include-process-args]] -==== `include_process_args` - -[options="header"] -|============ -| Environment | Django/Flask | Default -| `ELASTIC_APM_INCLUDE_PROCESS_ARGS` | `INCLUDE_PROCESS_ARGS` | `False` -|============ - -Whether each transaction should have the process arguments attached. Disabled by default to save disk space. - -[float] -[[config-django-specific]] -=== Django-specific configuration - -[float] -[[config-django-transaction-name-from-route]] -==== `django_transaction_name_from_route` - -[options="header"] -|============ -| Environment | Django | Default -| `ELASTIC_APM_DJANGO_TRANSACTION_NAME_FROM_ROUTE` | `DJANGO_TRANSACTION_NAME_FROM_ROUTE` | `False` -|============ - - -By default, we use the function or class name of the view as the transaction name. -Starting with Django 2.2, Django makes the route (e.g. `users//`) available on the `request.resolver_match` object. -If you want to use the route instead of the view name as the transaction name, set this config option to `true`. - -NOTE: in versions previous to Django 2.2, changing this setting will have no effect. - -[float] -[[config-django-autoinsert-middleware]] -==== `django_autoinsert_middleware` - -[options="header"] -|============ -| Environment | Django | Default -| `ELASTIC_APM_DJANGO_AUTOINSERT_MIDDLEWARE` | `DJANGO_AUTOINSERT_MIDDLEWARE` | `True` -|============ - -To trace Django requests, the agent uses a middleware, `elasticapm.contrib.django.middleware.TracingMiddleware`. -By default, this middleware is inserted automatically as the first item in `settings.MIDDLEWARES`. -To disable the automatic insertion of the middleware, change this setting to `False`. - - -[float] -[[config-generic-environment]] -=== Generic Environment variables - -Some environment variables that are not specific to the APM agent can be used to configure the agent. - -[float] -[[config-generic-http-proxy]] -==== `HTTP_PROXY` and `HTTPS_PROXY` - -By using `HTTP_PROXY` and `HTTPS_PROXY`, the agent can be instructed to use a proxy to connect to the APM Server. -If both are set, `HTTPS_PROXY` takes precedence. - -NOTE: The environment variables are case-insensitive. - -[float] -[[config-generic-no-proxy]] -==== `NO_PROXY` - -To instruct the agent to *not* use a proxy, you can use the `NO_PROXY` environment variable. -You can either set it to a comma-separated list of hosts for which no proxy should be used (e.g. `localhost,example.com`) -or use `*` to match any host. - -This is useful if `HTTP_PROXY` / `HTTPS_PROXY` is set for other reasons than agent / APM Server communication. - - -[float] -[[config-ssl-cert-file]] -==== `SSL_CERT_FILE` and `SSL_CERT_DIR` - -To tell the agent to use a different SSL certificate, you can use these environment variables. -See also https://www.openssl.org/docs/manmaster/man7/openssl-env.html#SSL_CERT_DIR-SSL_CERT_FILE[OpenSSL docs]. - -Please note that these variables may apply to other SSL/TLS communication in your service, -not just related to the APM agent. - -NOTE: These environment variables only take effect if <> is set to `False`. - -[float] -[[config-formats]] -=== Configuration formats - -Some options require a unit, either duration or size. -These need to be provided in a specific format. - -[float] -[[config-format-duration]] -==== Duration format - -The _duration_ format is used for options like timeouts. -The unit is provided as a suffix directly after the number–without any separation by whitespace. - -*Example*: `5ms` - -*Supported units* - - * `us` (microseconds) - * `ms` (milliseconds) - * `s` (seconds) - * `m` (minutes) - -[float] -[[config-format-size]] -==== Size format - -The _size_ format is used for options like maximum buffer sizes. -The unit is provided as suffix directly after the number, without and separation by whitespace. - - -*Example*: `10kb` - -*Supported units*: - - * `b` (bytes) - * `kb` (kilobytes) - * `mb` (megabytes) - * `gb` (gigabytes) - -NOTE: We use the power-of-two sizing convention, e.g. `1 kilobyte == 1024 bytes` diff --git a/docs/custom-instrumentation.asciidoc b/docs/custom-instrumentation.asciidoc deleted file mode 100644 index 1db067f72..000000000 --- a/docs/custom-instrumentation.asciidoc +++ /dev/null @@ -1,143 +0,0 @@ -[[instrumenting-custom-code]] -=== Instrumenting custom code - -[float] -[[instrumenting-custom-code-spans]] -==== Creating Additional Spans in a Transaction - -Elastic APM instruments a variety of libraries out of the box, but sometimes you -need to know how long a specific function took or how often it gets -called. - -Assuming you're using one of our <>, you can -apply the `@elasticapm.capture_span()` decorator to achieve exactly that. If -you're not using a supported framework, see -<>. - -`elasticapm.capture_span` can be used either as a decorator or as a context -manager. The following example uses it both ways: - -[source,python] ----- -import elasticapm - -@elasticapm.capture_span() -def coffee_maker(strength): - fetch_water() - - with elasticapm.capture_span('near-to-machine'): - insert_filter() - for i in range(strength): - pour_coffee() - - start_drip() - - fresh_pots() ----- - -Similarly, you can use `elasticapm.async_capture_span` for instrumenting `async` workloads: - -[source,python] ----- -import elasticapm - -@elasticapm.async_capture_span() -async def coffee_maker(strength): - await fetch_water() - - async with elasticapm.async_capture_span('near-to-machine'): - await insert_filter() - async for i in range(strength): - await pour_coffee() - - start_drip() - - fresh_pots() ----- - -NOTE: `asyncio` support is only available in Python 3.7+. - -See <> for more information on `capture_span`. - -[float] -[[instrumenting-custom-code-transactions]] -==== Creating New Transactions - -It's important to note that `elasticapm.capture_span` only works if there is -an existing transaction. If you're not using one of our <>, you need to create a `Client` object and begin and end the -transactions yourself. You can even utilize the agent's -<>! - -To collect the spans generated by the supported libraries, you need -to invoke `elasticapm.instrument()` (just once, at the initialization stage of -your application) and create at least one transaction. It is up to you to -determine what you consider a transaction within your application -- it can -be the whole execution of the script or a part of it. - -The example below will consider the whole execution as a single transaction -with two HTTP request spans in it. The config for `elasticapm.Client` can be -passed in programmatically, and it will also utilize any config environment -variables available to it automatically. - -[source,python] ----- -import requests -import time -import elasticapm - -def main(): - sess = requests.Session() - for url in [ 'https://www.elastic.co', 'https://benchmarks.elastic.co' ]: - resp = sess.get(url) - time.sleep(1) - -if __name__ == '__main__': - client = elasticapm.Client(service_name="foo", server_url="https://example.com:8200") - elasticapm.instrument() # Only call this once, as early as possible. - client.begin_transaction(transaction_type="script") - main() - client.end_transaction(name=__name__, result="success") ----- - -Note that you don't need to do anything to send the data -- the `Client` object -will handle that before the script exits. Additionally, the `Client` object should -be treated as a singleton -- you should only create one instance and store/pass -around that instance for all transaction handling. - -[float] -[[instrumenting-custom-code-distributed-tracing]] -==== Distributed Tracing - -When instrumenting custom code across multiple services, you should propagate -the TraceParent where possible. This allows Elastic APM to bundle the various -transactions into a single distributed trace. The Python Agent will -automatically add TraceParent information to the headers of outgoing HTTP -requests, which can then be used on the receiving end to add that TraceParent -information to new manually-created transactions. - -Additionally, the Python Agent provides utilities for propagating the -TraceParent in string format. - -[source,python] ----- -import elasticapm - -client = elasticapm.Client(service_name="foo", server_url="https://example.com:8200") - -# Retrieve the current TraceParent as a string, requires active transaction -traceparent_string = elasticapm.get_trace_parent_header() - -# Create a TraceParent object from a string and use it for a new transaction -parent = elasticapm.trace_parent_from_string(traceparent_string) -client.begin_transaction(transaction_type="script", trace_parent=parent) -# Do some work -client.end_transaction(name=__name__, result="success") - -# Create a TraceParent object from a dictionary of headers, provided -# automatically by the sending service if it is using an Elastic APM Agent. -parent = elasticapm.trace_parent_from_headers(headers_dict) -client.begin_transaction(transaction_type="script", trace_parent=parent) -# Do some work -client.end_transaction(name=__name__, result="success") ----- diff --git a/docs/django.asciidoc b/docs/django.asciidoc deleted file mode 100644 index 1aa8396f6..000000000 --- a/docs/django.asciidoc +++ /dev/null @@ -1,375 +0,0 @@ -[[django-support]] -=== Django support - -Getting Elastic APM set up for your Django project is easy, and there are various ways you can tweak it to fit to your needs. - -[float] -[[django-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add it to your project's `requirements.txt` file. - -NOTE: For apm-server 6.2+, make sure you use version 2.0 or higher of `elastic-apm`. - - -NOTE: If you use Django with uwsgi, make sure to -http://uwsgi-docs.readthedocs.org/en/latest/Options.html#enable-threads[enable -threads]. - -[float] -[[django-setup]] -==== Setup - -Set up the Elastic APM agent in Django with these two steps: - -1. Add `elasticapm.contrib.django` to `INSTALLED_APPS` in your settings: - -[source,python] ----- -INSTALLED_APPS = ( - # ... - 'elasticapm.contrib.django', -) ----- - -1. Choose a service name, and set the secret token if needed. - -[source,python] ----- -ELASTIC_APM = { - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', -} ----- - -or as environment variables: - -[source,shell] ----- -ELASTIC_APM_SERVICE_NAME= -ELASTIC_APM_SECRET_TOKEN= ----- - -You now have basic error logging set up, and everything resulting in a 500 HTTP status code will be reported to the APM Server. - -You can find a list of all available settings in the <> page. - -[NOTE] -==== -The agent only captures and sends data if you have `DEBUG = False` in your settings. -To force the agent to capture data in Django debug mode, set the <> configuration option, e.g.: - -[source,python] ----- -ELASTIC_APM = { - 'SERVICE_NAME': '', - 'DEBUG': True, -} ----- -==== - -[float] -[[django-performance-metrics]] -==== Performance metrics - -In order to collect performance metrics, -the agent automatically inserts a middleware at the top of your middleware list -(`settings.MIDDLEWARE` in current versions of Django, `settings.MIDDLEWARE_CLASSES` in some older versions). -To disable the automatic insertion of the middleware, -see <>. - -NOTE: For automatic insertion to work, -your list of middlewares (`settings.MIDDLEWARE` or `settings.MIDDLEWARE_CLASSES`) must be of type `list` or `tuple`. - -In addition to broad request metrics (what will appear in the APM app as transactions), -the agent also collects fine grained metrics on template rendering, -database queries, HTTP requests, etc. -You can find more information on what we instrument in the <> section. - -[float] -[[django-instrumenting-custom-python-code]] -===== Instrumenting custom Python code - -To gain further insights into the performance of your code, please see -<>. - -[float] -[[django-ignoring-specific-views]] -===== Ignoring specific views - -You can use the `TRANSACTIONS_IGNORE_PATTERNS` configuration option to ignore specific views. -The list given should be a list of regular expressions which are matched against the transaction name as seen in the Elastic APM user interface: - -[source,python] ----- -ELASTIC_APM['TRANSACTIONS_IGNORE_PATTERNS'] = ['^OPTIONS ', 'views.api.v2'] ----- - -This example ignores any requests using the `OPTIONS` method and any requests containing `views.api.v2`. - -[float] -[[django-transaction-name-route]] -===== Using the route as transaction name - -By default, we use the function or class name of the view as the transaction name. -Starting with Django 2.2, Django makes the route (e.g. `users//`) available on the `request.resolver_match` object. -If you want to use the route instead of the view name as the transaction name, you can set the <> config option to `true`. - -[source,python] ----- -ELASTIC_APM['DJANGO_TRANSACTION_NAME_FROM_ROUTE'] = True ----- - -NOTE: in versions previous to Django 2.2, changing this setting will have no effect. - -[float] -[[django-integrating-with-the-rum-agent]] -===== Integrating with the RUM Agent - -To correlate performance measurement in the browser with measurements in your Django app, -you can help the RUM (Real User Monitoring) agent by configuring it with the Trace ID and Span ID of the backend request. -We provide a handy template context processor which adds all the necessary bits into the context of your templates. - -To enable this feature, first add the `rum_tracing` context processor to your `TEMPLATES` setting. -You most likely already have a list of `context_processors`, in which case you can simply append ours to the list. - -[source,python] ----- -TEMPLATES = [ - { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'OPTIONS': { - 'context_processors': [ - # ... - 'elasticapm.contrib.django.context_processors.rum_tracing', - ], - }, - }, -] - ----- - -Then, update the call to initialize the RUM agent (which probably happens in your base template) like this: - -[source,javascript] ----- -elasticApm.init({ - serviceName: "my-frontend-service", - pageLoadTraceId: "{{ apm.trace_id }}", - pageLoadSpanId: "{{ apm.span_id }}", - pageLoadSampled: {{ apm.is_sampled_js }} -}) - ----- - -See the {apm-rum-ref}[JavaScript RUM agent documentation] for more information. - -[float] -[[django-enabling-and-disabling-the-agent]] -==== Enabling and disabling the agent - -The easiest way to disable the agent is to set Django’s `DEBUG` option to `True` in your development configuration. -No errors or metrics will be logged to Elastic APM. - -However, if during debugging you would like to force logging of errors to Elastic APM, then you can set `DEBUG` to `True` inside of the Elastic APM -configuration dictionary, like this: - -[source,python] ----- -ELASTIC_APM = { - # ... - 'DEBUG': True, -} ----- - -[float] -[[django-logging]] -==== Integrating with Python logging - -To easily send Python `logging` messages as "error" objects to Elasticsearch, -we provide a `LoggingHandler` which you can use in your logging setup. -The log messages will be enriched with a stack trace, data from the request, and more. - -NOTE: the intended use case for this handler is to send high priority log messages (e.g. log messages with level `ERROR`) -to Elasticsearch. For normal log shipping, we recommend using {filebeat-ref}[filebeat]. - -If you are new to how the `logging` module works together with Django, read more -https://docs.djangoproject.com/en/2.1/topics/logging/[in the Django documentation]. - -An example of how your `LOGGING` setting could look: - -[source,python] ----- -LOGGING = { - 'version': 1, - 'disable_existing_loggers': True, - 'formatters': { - 'verbose': { - 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' - }, - }, - 'handlers': { - 'elasticapm': { - 'level': 'WARNING', - 'class': 'elasticapm.contrib.django.handlers.LoggingHandler', - }, - 'console': { - 'level': 'DEBUG', - 'class': 'logging.StreamHandler', - 'formatter': 'verbose' - } - }, - 'loggers': { - 'django.db.backends': { - 'level': 'ERROR', - 'handlers': ['console'], - 'propagate': False, - }, - 'mysite': { - 'level': 'WARNING', - 'handlers': ['elasticapm'], - 'propagate': False, - }, - # Log errors from the Elastic APM module to the console (recommended) - 'elasticapm.errors': { - 'level': 'ERROR', - 'handlers': ['console'], - 'propagate': False, - }, - }, -} ----- - -With this configuration, logging can be done like this in any module in the `myapp` django app: - -You can now use the logger in any module in the `myapp` Django app, for instance `myapp/views.py`: - -[source,python] ----- -import logging -logger = logging.getLogger('mysite') - -try: - instance = MyModel.objects.get(pk=42) -except MyModel.DoesNotExist: - logger.error( - 'Could not find instance, doing something else', - exc_info=True - ) ----- - -Note that `exc_info=True` adds the exception information to the data that gets sent to Elastic APM. -Without it, only the message is sent. - -[float] -[[django-extra-data]] -===== Extra data - -If you want to send more data than what you get with the agent by default, logging can be done like so: - -[source,python] ----- -import logging -logger = logging.getLogger('mysite') - -try: - instance = MyModel.objects.get(pk=42) -except MyModel.DoesNotExist: - logger.error( - 'There was some crazy error', - exc_info=True, - extra={ - 'datetime': str(datetime.now()), - } - ) ----- - -[float] -[[django-celery-integration]] -==== Celery integration - -For a general guide on how to set up Django with Celery, head over to -Celery's http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html#django-first-steps[Django -documentation]. - -Elastic APM will automatically log errors from your celery tasks, record performance data and keep the trace.id -when the task is launched from an already started Elastic transaction. - -[float] -[[django-logging-http-404-not-found-errors]] -==== Logging "HTTP 404 Not Found" errors - -By default, Elastic APM does not log HTTP 404 errors. If you wish to log -these errors, add -`'elasticapm.contrib.django.middleware.Catch404Middleware'` to -`MIDDLEWARE` in your settings: - -[source,python] ----- -MIDDLEWARE = ( - # ... - 'elasticapm.contrib.django.middleware.Catch404Middleware', - # ... -) ----- - -Note that this middleware respects Django's -https://docs.djangoproject.com/en/1.11/ref/settings/#ignorable-404-urls[`IGNORABLE_404_URLS`] -setting. - -[float] -[[django-disable-agent-during-tests]] -==== Disable the agent during tests - -To prevent the agent from sending any data to the APM Server during tests, set the `ELASTIC_APM_DISABLE_SEND` environment variable to `true`, e.g.: - -[source,python] ----- -ELASTIC_APM_DISABLE_SEND=true python manage.py test ----- - -[float] -[[django-troubleshooting]] -==== Troubleshooting - -Elastic APM comes with a Django command that helps troubleshooting your setup. To check your configuration, run - -[source,bash] ----- -python manage.py elasticapm check ----- - -To send a test exception using the current settings, run - -[source,bash] ----- -python manage.py elasticapm test ----- - -If the command succeeds in sending a test exception, it will print a success message: - -[source,bash] ----- -python manage.py elasticapm test - -Trying to send a test error using these settings: - -SERVICE_NAME: -SECRET_TOKEN: -SERVER: http://127.0.0.1:8200 - -Success! We tracked the error successfully! You should be able to see it in a few seconds. ----- - -[float] -[[supported-django-and-python-versions]] -==== Supported Django and Python versions - -A list of supported <> and <> versions can be found on our <> page. diff --git a/docs/docset.yml b/docs/docset.yml new file mode 100644 index 000000000..c11f471c4 --- /dev/null +++ b/docs/docset.yml @@ -0,0 +1,19 @@ +project: 'APM Python agent docs' +products: + - id: apm-agent +cross_links: + - apm-agent-rum-js + - apm-aws-lambda + - beats + - docs-content + - ecs + - ecs-logging + - ecs-logging-python + - elasticsearch + - logstash-docs-md +toc: + - toc: reference + - toc: release-notes +subs: + ecloud: "Elastic Cloud" + apm-lambda-ext: "Elastic APM AWS Lambda extension" diff --git a/docs/flask.asciidoc b/docs/flask.asciidoc deleted file mode 100644 index 9fb8f7e3e..000000000 --- a/docs/flask.asciidoc +++ /dev/null @@ -1,245 +0,0 @@ -[[flask-support]] -=== Flask support - -Getting Elastic APM set up for your Flask project is easy, -and there are various ways you can tweak it to fit to your needs. - -[float] -[[flask-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install "elastic-apm[flask]" ----- - -or add `elastic-apm[flask]` to your project's `requirements.txt` file. - -NOTE: For apm-server 6.2+, make sure you use version 2.0 or higher of `elastic-apm`. - -NOTE: If you use Flask with uwsgi, make sure to -http://uwsgi-docs.readthedocs.org/en/latest/Options.html#enable-threads[enable -threads]. - -NOTE: If you see an error log that mentions `psutil not found`, you can install -`psutil` using `pip install psutil`, or add `psutil` to your `requirements.txt` -file. - -[float] -[[flask-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, -the application's settings, or as initialization arguments. - -You can find a list of all available settings in the <> page. - -To initialize the agent for your application using environment variables: - -[source,python] ----- -from elasticapm.contrib.flask import ElasticAPM - -app = Flask(__name__) - -apm = ElasticAPM(app) ----- - -To configure the agent using `ELASTIC_APM` in your application's settings: - -[source,python] ----- -from elasticapm.contrib.flask import ElasticAPM - -app.config['ELASTIC_APM'] = { - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', -} -apm = ElasticAPM(app) ----- - -The final option is to initialize the agent with the settings as arguments: - -[source,python] ----- -from elasticapm.contrib.flask import ElasticAPM - -apm = ElasticAPM(app, service_name='', secret_token='') ----- - -[float] -[[flask-debug-mode]] -===== Debug mode - -NOTE: Please note that errors and transactions will only be sent to the APM Server if your app is *not* in -http://flask.pocoo.org/docs/2.3.x/quickstart/#debug-mode[Flask debug mode]. - -To force the agent to send data while the app is in debug mode, -set the value of `DEBUG` in the `ELASTIC_APM` dictionary to `True`: - -[source,python] ----- -app.config['ELASTIC_APM'] = { - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', - 'DEBUG': True -} ----- - -[float] -[[flask-building-applications-on-the-fly]] -===== Building applications on the fly? - -You can use the agent's `init_app` hook for adding the application on the fly: - -[source,python] ----- -from elasticapm.contrib.flask import ElasticAPM -apm = ElasticAPM() - -def create_app(): - app = Flask(__name__) - apm.init_app(app, service_name='', secret_token='') - return app ----- - -[float] -[[flask-usage]] -==== Usage - -Once you have configured the agent, -it will automatically track transactions and capture uncaught exceptions within Flask. -If you want to send additional events, -a couple of shortcuts are provided on the ElasticAPM Flask middleware object -by raising an exception or logging a generic message. - -Capture an arbitrary exception by calling `capture_exception`: - -[source,python] ----- -try: - 1 / 0 -except ZeroDivisionError: - apm.capture_exception() ----- - -Log a generic message with `capture_message`: - -[source,python] ----- -apm.capture_message('hello, world!') ----- - -[float] -[[flask-logging]] -==== Shipping Logs to Elasticsearch - -This feature has been deprecated and will be removed in a future version. - -Please see our <> documentation for other supported ways to ship -logs to Elasticsearch. - -Note that you can always send exceptions and messages to the APM Server with -<> and and -<>. - -[source,python] ----- -from elasticapm import get_client - -@app.route('/') -def bar(): - try: - 1 / 0 - except ZeroDivisionError: - get_client().capture_exception() ----- - -[float] -[[flask-extra-data]] -===== Extra data - -In addition to what the agents log by default, you can send extra information: - -[source,python] ----- -@app.route('/') -def bar(): - try: - 1 / 0 - except ZeroDivisionError: - app.logger.error('Math is hard', - exc_info=True, - extra={ - 'good_at_math': False, - } - ) - ) ----- - -[float] -[[flask-celery-tasks]] -===== Celery tasks - -The Elastic APM agent will automatically send errors and performance data from your Celery tasks to the APM Server. - -[float] -[[flask-performance-metrics]] -==== Performance metrics - -If you've followed the instructions above, the agent has already hooked -into the right signals and should be reporting performance metrics. - -[float] -[[flask-ignoring-specific-views]] -===== Ignoring specific routes - -You can use the <> configuration option to ignore specific routes. -The list given should be a list of regular expressions which are matched against the transaction name: - -[source,python] ----- -app.config['ELASTIC_APM'] = { - ... - 'TRANSACTIONS_IGNORE_PATTERNS': ['^OPTIONS ', '/api/'] - ... -} ----- - -This would ignore any requests using the `OPTIONS` method -and any requests containing `/api/`. - - -[float] -[[flask-integrating-with-the-rum-agent]] -===== Integrating with the RUM Agent - -To correlate performance measurement in the browser with measurements in your Flask app, -you can help the RUM (Real User Monitoring) agent by configuring it with the Trace ID and Span ID of the backend request. -We provide a handy template context processor which adds all the necessary bits into the context of your templates. - -The context processor is installed automatically when you initialize `ElasticAPM`. -All that is left to do is to update the call to initialize the RUM agent (which probably happens in your base template) like this: - -[source,javascript] ----- -elasticApm.init({ - serviceName: "my-frontend-service", - pageLoadTraceId: "{{ apm["trace_id"] }}", - pageLoadSpanId: "{{ apm["span_id"]() }}", - pageLoadSampled: {{ apm["is_sampled_js"] }} -}) - ----- - -See the {apm-rum-ref}[JavaScript RUM agent documentation] for more information. - -[float] -[[supported-flask-and-python-versions]] -==== Supported Flask and Python versions - -A list of supported <> and <> versions can be found on our <> page. diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc deleted file mode 100644 index ec8a88bf8..000000000 --- a/docs/getting-started.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -[[getting-started]] -== Introduction - -The Elastic APM Python agent sends performance metrics and error logs to the APM Server. -It has built-in support for Django and Flask performance metrics and error logging, as well as generic support of other WSGI frameworks for error logging. - -[float] -[[how-it-works]] -=== How does the Agent work? - -The Python Agent instruments your application to collect APM events in a few different ways: - -To collect data about incoming requests and background tasks, the Agent integrates with <> to make use of hooks and signals provided by the framework. -These framework integrations require limited code changes in your application. - -To collect data from database drivers, HTTP libraries etc., -we instrument certain functions and methods in these libraries. -Instrumentations are set up automatically and do not require any code changes. - -In addition to APM and error data, -the Python agent also collects system and application metrics in regular intervals. -This collection happens in a background thread that is started by the agent. - -More detailed information on how the Agent works can be found in the <>. - -[float] -[[additional-components]] -=== Additional components - -APM Agents work in conjunction with the {apm-guide-ref}/index.html[APM Server], {ref}/index.html[Elasticsearch], and {kibana-ref}/index.html[Kibana]. -The {apm-guide-ref}/index.html[APM Guide] provides details on how these components work together, -and provides a matrix outlining {apm-guide-ref}/agent-server-compatibility.html[Agent and Server compatibility]. diff --git a/docs/grpc.asciidoc b/docs/grpc.asciidoc deleted file mode 100644 index 4b79e15f0..000000000 --- a/docs/grpc.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -[[grpc-support]] -=== GRPC Support - -Incorporating Elastic APM into your GRPC project only requires a few easy -steps. - -NOTE: currently, only unary-unary RPC calls are instrumented. Streaming requests or responses are not captured. - -[float] -[[grpc-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[grpc-setup]] -==== Setup - -Elastic APM can be used both in GRPC server apps, and in GRPC client apps. - -[float] -[[grpc-setup-client]] -===== GRPC Client - -If you use one of our <>, no further steps are needed. - -For other use cases, see <>. -To ensure that our instrumentation is in place, call `elasticapm.instrument()` *before* creating any GRPC channels. - -[float] -[[grpc-setup-server]] -===== GRPC Server - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, or as -initialization arguments. - -You can find a list of all available settings in the -<> page. - -To initialize the agent for your application using environment variables: - -[source,python] ----- -import elasticapm -from elasticapm.contrib.grpc import GRPCApmClient - -elasticapm.instrument() - -client = GRPCApmClient(service_name="my-grpc-server") ----- - - -Once you have configured the agent, it will automatically track transactions -and capture uncaught exceptions within GRPC requests. - diff --git a/docs/how-the-agent-works.asciidoc b/docs/how-the-agent-works.asciidoc deleted file mode 100644 index 796815144..000000000 --- a/docs/how-the-agent-works.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[how-the-agent-works]] -=== How the Agent works - -To gather APM events (called transactions and spans), errors and metrics, -the Python agent instruments your application in a few different ways. -These events, are then sent to the APM Server. -The APM Server converts them to a format suitable for Elasticsearch, and sends them to an Elasticsearch cluster. -You can then use the APM app in Kibana to gain insight into latency issues and error culprits within your application. - -Broadly, we differentiate between three different approaches to collect the necessary data: -framework integration, instrumentation, and background collection. - -[float] -[[how-it-works-framework-integration]] -==== Framework integration - -To collect data about incoming requests and background tasks, -we integrate with frameworks like <>, <> and Celery. -Whenever possible, framework integrations make use of hooks and signals provided by the framework. -Examples of this are: - - * `request_started`, `request_finished`, and `got_request_exception` signals from `django.core.signals` - * `request_started`, `request_finished`, and `got_request_exception` signals from `flask.signals` - * `task_prerun`, `task_postrun`, and `task_failure` signals from `celery.signals` - -Framework integrations require some limited code changes in your app. -E.g. for Django, you need to add `elasticapm.contrib.django` to `INSTALLED_APPS`. - -[float] -[[how-it-works-no-framework]] -==== What if you are not using a framework - -If you're not using a supported framework, for example, a simple Python script, you can still -leverage the agent's <>. Check out -our docs on <>. - -[float] -[[how-it-works-instrumentation]] -==== Instrumentation - -To collect data from database drivers, HTTP libraries etc., -we instrument certain functions and methods in these libraries. -Our instrumentation wraps these callables and collects additional data, like - - * time spent in the call - * the executed query for database drivers - * the fetched URL for HTTP libraries - -We use a 3rd party library, https://github.com/GrahamDumpleton/wrapt[`wrapt`], to wrap the callables. -You can read more on how `wrapt` works in Graham Dumpleton's -excellent series of http://blog.dscpl.com.au/search/label/wrapt[blog posts]. - -Instrumentations are set up automatically and do not require any code changes. -See <> to learn more about which libraries we support. - -[float] -[[how-it-works-background-collection]] -==== Background collection - -In addition to APM and error data, -the Python agent also collects system and application metrics in regular intervals. -This collection happens in a background thread that is started by the agent. - -In addition to the metrics collection background thread, -the agent starts two additional threads per process: - - * a thread to regularly fetch remote configuration from the APM Server - * a thread to process the collected data and send it to the APM Server via HTTP. - -Note that every process that instantiates the agent will have these three threads. -This means that when you e.g. use gunicorn or uwsgi workers, -each worker will have three threads started by the Python agent. diff --git a/docs/index.asciidoc b/docs/index.asciidoc deleted file mode 100644 index 544415367..000000000 --- a/docs/index.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -include::{asciidoc-dir}/../../shared/versions/stack/current.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes.asciidoc[] - -ifdef::env-github[] -NOTE: For the best reading experience, -please view this documentation at https://www.elastic.co/guide/en/apm/agent/python/current/index.html[elastic.co] -endif::[] - -= APM Python Agent Reference - -NOTE: Python 2.7 reached End of Life on January 1, 2020. -The Elastic APM agent will stop supporting Python 2.7 starting in version 6.0.0. - -include::./getting-started.asciidoc[] - -include::./set-up.asciidoc[] - -include::./supported-technologies.asciidoc[] - -include::./configuration.asciidoc[] - -include::./advanced-topics.asciidoc[] - -include::./api.asciidoc[] - -include::./metrics.asciidoc[] - -include::./opentelemetry.asciidoc[] - -include::./logging.asciidoc[] - -include::./tuning.asciidoc[] - -include::./troubleshooting.asciidoc[] - -include::./upgrading.asciidoc[] - -include::./release-notes.asciidoc[] - -include::./redirects.asciidoc[] diff --git a/docs/lambda/configure-lambda-widget.asciidoc b/docs/lambda/configure-lambda-widget.asciidoc deleted file mode 100644 index 9763f49f8..000000000 --- a/docs/lambda/configure-lambda-widget.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -++++ -
-
- - - - - - -
-
-++++ - -include::configure-lambda.asciidoc[tag=console-{layer-section-type}] - -++++ -
- - - - - -
-++++ \ No newline at end of file diff --git a/docs/lambda/configure-lambda.asciidoc b/docs/lambda/configure-lambda.asciidoc deleted file mode 100644 index 09377dcee..000000000 --- a/docs/lambda/configure-lambda.asciidoc +++ /dev/null @@ -1,113 +0,0 @@ -// tag::console-with-agent[] - -To configure APM through the AWS Management Console: - -1. Navigate to your function in the AWS Management Console -2. Click on the _Configuration_ tab -3. Click on _Environment variables_ -4. Add the following required variables: - -[source,bash] ----- -AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda # use this exact fixed value -ELASTIC_APM_LAMBDA_APM_SERVER = # this is your APM Server URL -ELASTIC_APM_SECRET_TOKEN = # this is your APM secret token -ELASTIC_APM_SEND_STRATEGY = background <1> ----- - --- -include::{apm-aws-lambda-root}/docs/images/images.asciidoc[tag=python-env-vars] --- - -// end::console-with-agent[] - -// tag::cli-with-agent[] - -To configure APM through the AWS command line interface execute the following command: - -[source,bash] ----- -aws lambda update-function-configuration --function-name yourLambdaFunctionName \ - --environment "Variables={AWS_LAMBDA_EXEC_WRAPPER=/opt/python/bin/elasticapm-lambda,ELASTIC_APM_LAMBDA_APM_SERVER=,ELASTIC_APM_SECRET_TOKEN=,ELASTIC_APM_SEND_STRATEGY=background}" <1> ----- - -// end::cli-with-agent[] - -// tag::sam-with-agent[] - -In your SAM `template.yml` file configure the following environment variables: - -[source,yml] ----- -... -Resources: - yourLambdaFunction: - Type: AWS::Serverless::Function - Properties: - ... - Environment: - Variables: - AWS_LAMBDA_EXEC_WRAPPER: /opt/python/bin/elasticapm-lambda - ELASTIC_APM_LAMBDA_APM_SERVER: - ELASTIC_APM_SECRET_TOKEN: - ELASTIC_APM_SEND_STRATEGY: background <1> -... ----- - -// end::sam-with-agent[] - -// tag::serverless-with-agent[] - -In your `serverless.yml` file configure the following environment variables: - -[source,yml] ----- -... -functions: - yourLambdaFunction: - ... - environment: - AWS_LAMBDA_EXEC_WRAPPER: /opt/python/bin/elasticapm-lambda - ELASTIC_APM_LAMBDA_APM_SERVER: - ELASTIC_APM_SECRET_TOKEN: - ELASTIC_APM_SEND_STRATEGY: background <1> -... ----- - -// end::serverless-with-agent[] - -// tag::terraform-with-agent[] -In your Terraform file configure the following environment variables: - -[source,terraform] ----- -... -resource "aws_lambda_function" "your_lambda_function" { - ... - environment { - variables = { - AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda - ELASTIC_APM_LAMBDA_APM_SERVER = "" - ELASTIC_APM_SECRET_TOKEN = "" - ELASTIC_APM_SEND_STRATEGY = "background" <1> - } - } -} -... ----- - -// end::terraform-with-agent[] - -// tag::container-with-agent[] -Environment variables configured for an AWS Lambda function are passed to the container running the lambda function. -You can use one of the other options (through AWS Web Console, AWS CLI, etc.) to configure the following environment variables: - -[source,bash] ----- -AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda # use this exact fixed value -ELASTIC_APM_LAMBDA_APM_SERVER = # this is your APM Server URL -ELASTIC_APM_SECRET_TOKEN = # this is your APM secret token -ELASTIC_APM_SEND_STRATEGY = background <1> ----- - -// end::container-with-agent[] diff --git a/docs/lambda/python-arn-replacement.asciidoc b/docs/lambda/python-arn-replacement.asciidoc deleted file mode 100644 index 24d9d1a7f..000000000 --- a/docs/lambda/python-arn-replacement.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -++++ - -++++ \ No newline at end of file diff --git a/docs/logging.asciidoc b/docs/logging.asciidoc deleted file mode 100644 index 9e0b24922..000000000 --- a/docs/logging.asciidoc +++ /dev/null @@ -1,190 +0,0 @@ -[[logs]] -== Logs - -Elastic Python APM Agent provides the following log features: - -- <> : Automatically inject correlation IDs that allow navigation between logs, traces and services. -- <> : Automatically reformat plaintext logs in {ecs-logging-ref}/intro.html[ECS logging] format. - -Those features are part of {observability-guide}/application-logs.html[Application log ingestion strategies]. - -The {ecs-logging-python-ref}/intro.html[`ecs-logging-python`] library can also be used to use the {ecs-logging-ref}/intro.html[ECS logging] format without an APM agent. -When deployed with the Python APM agent, the agent will provide <> IDs. - -[float] -[[log-correlation-ids]] -=== Log correlation - -{apm-guide-ref}/log-correlation.html[Log correlation] allows you to navigate to all logs belonging to a particular trace -and vice-versa: for a specific log, see in which context it has been logged and which parameters the user provided. - -The Agent provides integrations with both the default Python logging library, -as well as http://www.structlog.org/en/stable/[`structlog`]. - -* <> -* <> - -[float] -[[logging-integrations]] -==== Logging integrations - -[float] -[[logging]] -===== `logging` - -For Python 3.2+, we use https://docs.python.org/3/library/logging.html#logging.setLogRecordFactory[`logging.setLogRecordFactory()`] -to decorate the default LogRecordFactory to automatically add new attributes to -each LogRecord object: - -* `elasticapm_transaction_id` -* `elasticapm_trace_id` -* `elasticapm_span_id` - -This factory also adds these fields to a dictionary attribute, -`elasticapm_labels`, using the official ECS https://www.elastic.co/guide/en/ecs/current/ecs-tracing.html[tracing fields]. - -You can disable this automatic behavior by using the -<> setting -in your configuration. - -For Python versions <3.2, we also provide a -https://docs.python.org/3/library/logging.html#filter-objects[filter] which will -add the same new attributes to any filtered `LogRecord`: - -[source,python] ----- -import logging -from elasticapm.handlers.logging import LoggingFilter - -console = logging.StreamHandler() -console.addFilter(LoggingFilter()) -# add the handler to the root logger -logging.getLogger("").addHandler(console) ----- - -NOTE: Because https://docs.python.org/3/library/logging.html#filter-objects[filters -are not propagated to descendent loggers], you should add the filter to each of -your log handlers, as handlers are propagated, along with their attached filters. - -[float] -[[structlog]] -===== `structlog` - -We provide a http://www.structlog.org/en/stable/processors.html[processor] for -http://www.structlog.org/en/stable/[`structlog`] which will add three new keys -to the event_dict of any processed event: - -* `transaction.id` -* `trace.id` -* `span.id` - -[source,python] ----- -from structlog import PrintLogger, wrap_logger -from structlog.processors import JSONRenderer -from elasticapm.handlers.structlog import structlog_processor - -wrapped_logger = PrintLogger() -logger = wrap_logger(wrapped_logger, processors=[structlog_processor, JSONRenderer()]) -log = logger.new() -log.msg("some_event") ----- - -[float] -===== Use structlog for agent-internal logging - -The Elastic APM Python agent uses logging to log internal events and issues. -By default, it will use a `logging` logger. -If your project uses structlog, you can tell the agent to use a structlog logger -by setting the environment variable `ELASTIC_APM_USE_STRUCTLOG` to `true`. - -[float] -[[log-correlation-in-es]] -=== Log correlation in Elasticsearch - -In order to correlate logs from your app with transactions captured by the -Elastic APM Python Agent, your logs must contain one or more of the following -identifiers: - -* `transaction.id` -* `trace.id` -* `span.id` - -If you're using structured logging, either https://docs.python.org/3/howto/logging-cookbook.html#implementing-structured-logging[with a custom solution] -or with http://www.structlog.org/en/stable/[structlog] (recommended), then this -is fairly easy. Throw the http://www.structlog.org/en/stable/api.html#structlog.processors.JSONRenderer[JSONRenderer] -in, and use {blog-ref}structured-logging-filebeat[Filebeat] -to pull these logs into Elasticsearch. - -Without structured logging the task gets a little trickier. Here we -recommend first making sure your LogRecord objects have the elasticapm -attributes (see <>), and then you'll want to combine some specific -formatting with a Grok pattern, either in Elasticsearch using -{ref}/grok-processor.html[the grok processor], -or in {logstash-ref}/plugins-filters-grok.html[logstash with a plugin]. - -Say you have a https://docs.python.org/3/library/logging.html#logging.Formatter[Formatter] -that looks like this: - -[source,python] ----- -import logging - -fh = logging.FileHandler('spam.log') -formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -fh.setFormatter(formatter) ----- - -You can add the APM identifiers by simply switching out the `Formatter` object -for the one that we provide: - -[source,python] ----- -import logging -from elasticapm.handlers.logging import Formatter - -fh = logging.FileHandler('spam.log') -formatter = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -fh.setFormatter(formatter) ----- - -This will automatically append apm-specific fields to your format string: - -[source,python] ----- -formatstring = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" -formatstring = formatstring + " | elasticapm " \ - "transaction.id=%(elasticapm_transaction_id)s " \ - "trace.id=%(elasticapm_trace_id)s " \ - "span.id=%(elasticapm_span_id)s" ----- - -Then, you could use a grok pattern like this (for the -{ref}/grok-processor.html[Elasticsearch Grok Processor]): - -[source, json] ----- -{ - "description" : "...", - "processors": [ - { - "grok": { - "field": "message", - "patterns": ["%{GREEDYDATA:msg} | elasticapm transaction.id=%{DATA:transaction.id} trace.id=%{DATA:trace.id} span.id=%{DATA:span.id}"] - } - } - ] -} ----- - -[float] -[[log-reformatting]] -=== Log reformatting (experimental) - -Starting in version 6.16.0, the agent can automatically reformat application -logs to ECS format with no changes to dependencies. Prior versions must install -the `ecs_logging` dependency. - -Log reformatting is controlled by the <> configuration option, and is disabled by default. - -The reformatted logs will include both the <> IDs. diff --git a/docs/metrics.asciidoc b/docs/metrics.asciidoc deleted file mode 100644 index 2d7ae6216..000000000 --- a/docs/metrics.asciidoc +++ /dev/null @@ -1,215 +0,0 @@ -[[metrics]] -== Metrics - -With Elastic APM, you can capture system and process metrics. -These metrics will be sent regularly to the APM Server and from there to Elasticsearch - -[float] -[[metric-sets]] -=== Metric sets - -* <> -* <> -* <> -* <> - -[float] -[[cpu-memory-metricset]] -==== CPU/Memory metric set - -`elasticapm.metrics.sets.cpu.CPUMetricSet` - -This metric set collects various system metrics and metrics of the current process. - -NOTE: if you do *not* use Linux, you need to install https://pypi.org/project/psutil/[`psutil`] for this metric set. - - -*`system.cpu.total.norm.pct`*:: -+ --- -type: scaled_float - -format: percent - -The percentage of CPU time in states other than Idle and IOWait, normalized by the number of cores. --- - - -*`system.process.cpu.total.norm.pct`*:: -+ --- -type: scaled_float - -format: percent - -The percentage of CPU time spent by the process since the last event. -This value is normalized by the number of CPU cores and it ranges from 0 to 100%. --- - -*`system.memory.total`*:: -+ --- -type: long - -format: bytes - -Total memory. --- - -*`system.memory.actual.free`*:: -+ --- -type: long - -format: bytes - -Actual free memory in bytes. --- - -*`system.process.memory.size`*:: -+ --- -type: long - -format: bytes - -The total virtual memory the process has. --- - -*`system.process.memory.rss.bytes`*:: -+ --- -type: long - -format: bytes - -The Resident Set Size. The amount of memory the process occupied in main memory (RAM). --- - -[float] -[[cpu-memory-cgroup-metricset]] -===== Linux’s cgroup metrics - -*`system.process.cgroup.memory.mem.limit.bytes`*:: -+ --- -type: long - -format: bytes - -Memory limit for current cgroup slice. --- - -*`system.process.cgroup.memory.mem.usage.bytes`*:: -+ --- -type: long - -format: bytes - -Memory usage in current cgroup slice. --- - - -[float] -[[breakdown-metricset]] -==== Breakdown metric set - -NOTE: Tracking and collection of this metric set can be disabled using the <> setting. - -*`span.self_time`*:: -+ --- -type: simple timer - -This timer tracks the span self-times and is the basis of the transaction breakdown visualization. - -Fields: - -* `sum`: The sum of all span self-times in ms since the last report (the delta) -* `count`: The count of all span self-times since the last report (the delta) - -You can filter and group by these dimensions: - -* `transaction.name`: The name of the transaction -* `transaction.type`: The type of the transaction, for example `request` -* `span.type`: The type of the span, for example `app`, `template` or `db` -* `span.subtype`: The sub-type of the span, for example `mysql` (optional) - --- -[float] -[[prometheus-metricset]] -==== Prometheus metric set (beta) - -beta[] - -If you use https://github.com/prometheus/client_python[`prometheus_client`] to collect metrics, the agent can -collect them as well and make them available in Elasticsearch. - -The following types of metrics are supported: - - * Counters - * Gauges - * Summaries - * Histograms (requires APM Server / Elasticsearch / Kibana 7.14+) - -To use the Prometheus metric set, you have to enable it with the <> configuration option. - -All metrics collected from `prometheus_client` are prefixed with `"prometheus.metrics."`. This can be changed using the <> configuration option. - -[float] -[[prometheus-metricset-beta]] -===== Beta limitations - * The metrics format may change without backwards compatibility in future releases. - -[float] -[[custom-metrics]] -=== Custom Metrics - -Custom metrics allow you to send your own metrics to Elasticsearch. - -The most common way to send custom metrics is with the -<>. However, you can also use your -own metric set. If you collect the metrics manually in your code, you can use -the base `MetricSet` class: - -[source,python] ----- -from elasticapm.metrics.base_metrics import MetricSet - -client = elasticapm.Client() -metricset = client.metrics.register(MetricSet) - -for x in range(10): - metricset.counter("my_counter").inc() ----- - -Alternatively, you can create your own MetricSet class which inherits from the -base class. In this case, you'll usually want to override the `before_collect` -method, where you can gather and set metrics before they are collected and sent -to Elasticsearch. - -You can add your `MetricSet` class as shown in the example above, or you can -add an import string for your class to the <> -configuration option: - -[source,bash] ----- -ELASTIC_APM_METRICS_SETS="elasticapm.metrics.sets.cpu.CPUMetricSet,myapp.metrics.MyMetricSet" ----- - -Your MetricSet might look something like this: - -[source,python] ----- -from elasticapm.metrics.base_metrics import MetricSet - -class MyAwesomeMetricSet(MetricSet): - def before_collect(self): - self.gauge("my_gauge").set(myapp.some_value) ----- - -In the example above, the MetricSet would look up `myapp.some_value` and set -the metric `my_gauge` to that value. This would happen whenever metrics are -collected/sent, which is controlled by the -<> setting. \ No newline at end of file diff --git a/docs/opentelemetry.asciidoc b/docs/opentelemetry.asciidoc deleted file mode 100644 index 531fd09d4..000000000 --- a/docs/opentelemetry.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -[[opentelemetry-bridge]] -== OpenTelemetry API Bridge - -The Elastic APM OpenTelemetry bridge allows you to create Elastic APM `Transactions` and `Spans`, -using the OpenTelemetry API. This allows users to utilize the Elastic APM agent's -automatic instrumentations, while keeping custom instrumentations vendor neutral. - -If a span is created while there is no transaction active, it will result in an -Elastic APM {apm-guide-ref}/data-model-transactions.html[`Transaction`]. Inner spans -are mapped to Elastic APM {apm-guide-ref}/data-model-spans.html[`Span`]. - -[float] -[[opentelemetry-getting-started]] -=== Getting started -The first step in getting started with the OpenTelemetry bridge is to install the `opentelemetry` libraries: - -[source,bash] ----- -pip install elastic-apm[opentelemetry] ----- - -Or if you already have installed `elastic-apm`: - - -[source,bash] ----- -pip install opentelemetry-api opentelemetry-sdk ----- - - -[float] -[[opentelemetry-usage]] -=== Usage - -[source,python] ----- -from elasticapm.contrib.opentelemetry import Tracer - -tracer = Tracer(__name__); -with tracer.start_as_current_span("test"): - # Do some work ----- - -or - -[source,python] ----- -from elasticapm.contrib.opentelemetry import trace - -tracer = trace.get_tracer(__name__) -with tracer.start_as_current_span("test"): - # Do some work ----- - - -`Tracer` and `get_tracer()` accept the following optional arguments: - - * `elasticapm_client`: an already instantiated Elastic APM client - * `config`: a configuration dictionary, which will be used to instantiate a new Elastic APM client, - e.g. `{"SERVER_URL": "https://example.org"}`. See <> for more information. - -The `Tracer` object mirrors the upstream interface on the -https://opentelemetry-python.readthedocs.io/en/latest/api/trace.html#opentelemetry.trace.Tracer[OpenTelemetry `Tracer` object.] - - -[float] -[[opentelemetry-caveats]] -=== Caveats -Not all features of the OpenTelemetry API are supported. - -Processors, exporters, metrics, logs, span events, and span links are not supported. - -Additionally, due to implementation details, the global context API only works -when a span is included in the activated context, and tokens are not used. -Instead, the global context works as a stack, and when a context is detached the -previously-active context will automatically be activated. diff --git a/docs/redirects.asciidoc b/docs/redirects.asciidoc deleted file mode 100644 index c924b6efe..000000000 --- a/docs/redirects.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -["appendix",role="exclude",id="redirects"] -== Deleted pages - -The following pages have moved or been deleted. - -[role="exclude",id="opentracing-bridge"] -=== OpenTracing API - -Refer to <> instead. - -[role="exclude",id="log-correlation"] -=== Log correlation - -Refer to <> instead. diff --git a/docs/reference/advanced-topics.md b/docs/reference/advanced-topics.md new file mode 100644 index 000000000..1f251c886 --- /dev/null +++ b/docs/reference/advanced-topics.md @@ -0,0 +1,22 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/advanced-topics.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Advanced topics [advanced-topics] + +* [Instrumenting custom code](/reference/instrumenting-custom-code.md) +* [Sanitizing data](/reference/sanitizing-data.md) +* [How the Agent works](/reference/how-agent-works.md) +* [Run Tests Locally](/reference/run-tests-locally.md) + + + + + diff --git a/docs/reference/aiohttp-server-support.md b/docs/reference/aiohttp-server-support.md new file mode 100644 index 000000000..77e8c25e3 --- /dev/null +++ b/docs/reference/aiohttp-server-support.md @@ -0,0 +1,118 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/aiohttp-server-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Aiohttp Server support [aiohttp-server-support] + +Getting Elastic APM set up for your Aiohttp Server project is easy, and there are various ways you can tweak it to fit to your needs. + + +## Installation [aiohttp-server-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add `elastic-apm` to your project’s `requirements.txt` file. + + +## Setup [aiohttp-server-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, the application’s settings, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To initialize the agent for your application using environment variables: + +```python +from aiohttp import web + +from elasticapm.contrib.aiohttp import ElasticAPM + +app = web.Application() + +apm = ElasticAPM(app) +``` + +To configure the agent using `ELASTIC_APM` in your application’s settings: + +```python +from aiohttp import web + +from elasticapm.contrib.aiohttp import ElasticAPM + +app = web.Application() + +app['ELASTIC_APM'] = { + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', +} +apm = ElasticAPM(app) +``` + + +## Usage [aiohttp-server-usage] + +Once you have configured the agent, it will automatically track transactions and capture uncaught exceptions within aiohttp. + +Capture an arbitrary exception by calling [`capture_exception`](/reference/api-reference.md#client-api-capture-exception): + +```python +try: + 1 / 0 +except ZeroDivisionError: + apm.client.capture_exception() +``` + +Log a generic message with [`capture_message`](/reference/api-reference.md#client-api-capture-message): + +```python +apm.client.capture_message('hello, world!') +``` + + +## Performance metrics [aiohttp-server-performance-metrics] + +If you’ve followed the instructions above, the agent has already installed our middleware. This will measure response times, as well as detailed performance data for all supported technologies. + +::::{note} +due to the fact that `asyncio` drivers are usually separate from their synchronous counterparts, specific instrumentation is needed for all drivers. The support for asynchronous drivers is currently quite limited. +:::: + + + +### Ignoring specific routes [aiohttp-server-ignoring-specific-views] + +You can use the [`TRANSACTIONS_IGNORE_PATTERNS`](/reference/configuration.md#config-transactions-ignore-patterns) configuration option to ignore specific routes. The list given should be a list of regular expressions which are matched against the transaction name: + +```python +app['ELASTIC_APM'] = { + # ... + 'TRANSACTIONS_IGNORE_PATTERNS': ['^OPTIONS ', '/api/'] + # ... +} +``` + +This would ignore any requests using the `OPTIONS` method and any requests containing `/api/`. + + +## Supported aiohttp and Python versions [supported-aiohttp-and-python-versions] + +A list of supported [aiohttp](/reference/supported-technologies.md#supported-aiohttp) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + +::::{note} +Elastic APM only supports `asyncio` when using Python 3.7+ +:::: + + diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md new file mode 100644 index 000000000..c99a840a0 --- /dev/null +++ b/docs/reference/api-reference.md @@ -0,0 +1,515 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/api.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# API reference [api] + +The Elastic APM Python agent has several public APIs. Most of the public API functionality is not needed when using one of our [supported frameworks](/reference/supported-technologies.md#framework-support), but they allow customized usage. + + +## Client API [client-api] + +The public Client API consists of several methods on the `Client` class. This API can be used to track exceptions and log messages, as well as to mark the beginning and end of transactions. + + +### Instantiation [client-api-init] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +To create a `Client` instance, import it and call its constructor: + +```python +from elasticapm import Client + +client = Client({'SERVICE_NAME': 'example'}, **defaults) +``` + +* `config`: A dictionary, with key/value configuration. For the possible configuration keys, see [Configuration](/reference/configuration.md). +* `**defaults`: default values for configuration. These can be omitted in most cases, and take the least precedence. + +::::{note} +framework integrations like [Django](/reference/django-support.md) and [Flask](/reference/flask-support.md) instantiate the client automatically. +:::: + + + +#### `elasticapm.get_client()` [api-get-client] + +```{applies_to} +apm_agent_python: ga 6.1.0 +``` + +Retrieves the `Client` singleton. This is useful for many framework integrations, where the client is instantiated automatically. + +```python +client = elasticapm.get_client() +client.capture_message('foo') +``` + + +### Errors [error-api] + + +#### `Client.capture_exception()` [client-api-capture-exception] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +`handled` added in v2.0.0. + +Captures an exception object: + +```python +try: + x = int("five") +except ValueError: + client.capture_exception() +``` + +* `exc_info`: A `(type, value, traceback)` tuple as returned by [`sys.exc_info()`](https://docs.python.org/3/library/sys.html#sys.exc_info). If not provided, it will be captured automatically. +* `date`: A `datetime.datetime` object representing the occurrence time of the error. If left empty, it defaults to `datetime.datetime.utcnow()`. +* `context`: A dictionary with contextual information. This dictionary must follow the [Context](docs-content://solutions/observability/apm/elastic-apm-events-intake-api.md#apm-api-error) schema definition. +* `custom`: A dictionary of custom data you want to attach to the event. +* `handled`: A boolean to indicate if this exception was handled or not. + +Returns the id of the error as a string. + + +#### `Client.capture_message()` [client-api-capture-message] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +Captures a message with optional added contextual data. Example: + +```python +client.capture_message('Billing process succeeded.') +``` + +* `message`: The message as a string. +* `param_message`: Alternatively, a parameterized message as a dictionary. The dictionary contains two values: `message`, and `params`. This allows the APM Server to group messages together that share the same parameterized message. Example: + + ```python + client.capture_message(param_message={ + 'message': 'Billing process for %s succeeded. Amount: %s', + 'params': (customer.id, order.total_amount), + }) + ``` + +* `stack`: If set to `True` (the default), a stacktrace from the call site will be captured. +* `exc_info`: A `(type, value, traceback)` tuple as returned by [`sys.exc_info()`](https://docs.python.org/3/library/sys.html#sys.exc_info). If not provided, it will be captured automatically, if `capture_message()` was called in an `except` block. +* `date`: A `datetime.datetime` object representing the occurrence time of the error. If left empty, it defaults to `datetime.datetime.utcnow()`. +* `context`: A dictionary with contextual information. This dictionary must follow the [Context](docs-content://solutions/observability/apm/elastic-apm-events-intake-api.md#apm-api-error) schema definition. +* `custom`: A dictionary of custom data you want to attach to the event. + +Returns the id of the message as a string. + +::::{note} +Either the `message` or the `param_message` argument is required. +:::: + + + +### Transactions [transaction-api] + + +#### `Client.begin_transaction()` [client-api-begin-transaction] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +`trace_parent` support added in v5.6.0. + +Begin tracking a transaction. Should be called e.g. at the beginning of a request or when starting a background task. Example: + +```python +client.begin_transaction('processors') +``` + +* `transaction_type`: (**required**) A string describing the type of the transaction, e.g. `'request'` or `'celery'`. +* `trace_parent`: (**optional**) A `TraceParent` object. See [TraceParent generation](#traceparent-api). +* `links`: (**optional**) A list of `TraceParent` objects to which this transaction is causally linked. + + +#### `Client.end_transaction()` [client-api-end-transaction] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +End tracking the transaction. Should be called e.g. at the end of a request or when ending a background task. Example: + +```python +client.end_transaction('myapp.billing_process', processor.status) +``` + +* `name`: (**optional**) A string describing the name of the transaction, e.g. `process_order`. This is typically the name of the view/controller that handles the request, or the route name. +* `result`: (**optional**) A string describing the result of the transaction. This is typically the HTTP status code, or e.g. `'success'` for a background task. + +::::{note} +if `name` and `result` are not set in the `end_transaction()` call, they have to be set beforehand by calling [`elasticapm.set_transaction_name()`](#api-set-transaction-name) and [`elasticapm.set_transaction_result()`](#api-set-transaction-result) during the transaction. +:::: + + + +### `TraceParent` [traceparent-api] + +Transactions can be started with a `TraceParent` object. This creates a transaction that is a child of the `TraceParent`, which is essential for distributed tracing. + + +#### `elasticapm.trace_parent_from_string()` [api-traceparent-from-string] + +```{applies_to} +apm_agent_python: ga 5.6.0 +``` + +Create a `TraceParent` object from the string representation generated by `TraceParent.to_string()`: + +```python +parent = elasticapm.trace_parent_from_string('00-03d67dcdd62b7c0f7a675424347eee3a-5f0e87be26015733-01') +client.begin_transaction('processors', trace_parent=parent) +``` + +* `traceparent_string`: (**required**) A string representation of a `TraceParent` object. + + +#### `elasticapm.trace_parent_from_headers()` [api-traceparent-from-headers] + +```{applies_to} +apm_agent_python: ga 5.6.0 +``` + +Create a `TraceParent` object from HTTP headers (usually generated by another Elastic APM agent): + +```python +parent = elasticapm.trace_parent_from_headers(headers_dict) +client.begin_transaction('processors', trace_parent=parent) +``` + +* `headers`: (**required**) HTTP headers formed as a dictionary. + + +#### `elasticapm.get_trace_parent_header()` [api-traceparent-get-header] + +```{applies_to} +apm_agent_python: ga 5.10.0 +``` + +Return the string representation of the current transaction `TraceParent` object: + +```python +elasticapm.get_trace_parent_header() +``` + + +## Other APIs [api-other] + + +### `elasticapm.instrument()` [api-elasticapm-instrument] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +Instruments libraries automatically. This includes a wide range of standard library and 3rd party modules. A list of instrumented modules can be found in `elasticapm.instrumentation.register`. This function should be called as early as possible in the startup of your application. For [supported frameworks](/reference/supported-technologies.md#framework-support), this is called automatically. Example: + +```python +import elasticapm + +elasticapm.instrument() +``` + + +### `elasticapm.set_transaction_name()` [api-set-transaction-name] + +```{applies_to} +apm_agent_python: ga 1.0.0 +``` + +Set the name of the current transaction. For supported frameworks, the transaction name is determined automatically, and can be overridden using this function. Example: + +```python +import elasticapm + +elasticapm.set_transaction_name('myapp.billing_process') +``` + +* `name`: (**required**) A string describing name of the transaction +* `override`: if `True` (the default), overrides any previously set transaction name. If `False`, only sets the name if the transaction name hasn’t already been set. + + +### `elasticapm.set_transaction_result()` [api-set-transaction-result] + +```{applies_to} +apm_agent_python: ga 2.2.0 +``` + +Set the result of the current transaction. For supported frameworks, the transaction result is determined automatically, and can be overridden using this function. Example: + +```python +import elasticapm + +elasticapm.set_transaction_result('SUCCESS') +``` + +* `result`: (**required**) A string describing the result of the transaction, e.g. `HTTP 2xx` or `SUCCESS` +* `override`: if `True` (the default), overrides any previously set result. If `False`, only sets the result if the result hasn’t already been set. + + +### `elasticapm.set_transaction_outcome()` [api-set-transaction-outcome] + +```{applies_to} +apm_agent_python: ga 5.9.0 +``` + +Sets the outcome of the transaction. The value can either be `"success"`, `"failure"` or `"unknown"`. This should only be called at the end of a transaction after the outcome is determined. + +The `outcome` is used for error rate calculations. `success` denotes that a transaction has concluded successful, while `failure` indicates that the transaction failed to finish successfully. If the `outcome` is set to `unknown`, the transaction will not be included in error rate calculations. + +For supported web frameworks, the transaction outcome is set automatically if it has not been set yet, based on the HTTP status code. A status code below `500` is considered a `success`, while any value of `500` or higher is counted as a `failure`. + +If your transaction results in an HTTP response, you can alternatively provide the HTTP status code. + +::::{note} +While the `outcome` and `result` field look very similar, they serve different purposes. Other than the `result` field, which canhold an arbitrary string value, `outcome` is limited to three different values, `"success"`, `"failure"` and `"unknown"`. This allows the APM app to perform error rate calculations on these values. +:::: + + +Example: + +```python +import elasticapm + +elasticapm.set_transaction_outcome("success") + +# Using an HTTP status code +elasticapm.set_transaction_outcome(http_status_code=200) + +# Using predefined constants: + +from elasticapm.conf.constants import OUTCOME + +elasticapm.set_transaction_outcome(OUTCOME.SUCCESS) +elasticapm.set_transaction_outcome(OUTCOME.FAILURE) +elasticapm.set_transaction_outcome(OUTCOME.UNKNOWN) +``` + +* `outcome`: One of `"success"`, `"failure"` or `"unknown"`. Can be omitted if `http_status_code` is provided. +* `http_status_code`: if the transaction represents an HTTP response, its status code can be provided to determine the `outcome` automatically. +* `override`: if `True` (the default), any previously set `outcome` will be overridden. If `False`, the outcome will only be set if it was not set before. + + +### `elasticapm.get_transaction_id()` [api-get-transaction-id] + +```{applies_to} +apm_agent_python: ga 5.2.0 +``` + +Get the id of the current transaction. Example: + +```python +import elasticapm + +transaction_id = elasticapm.get_transaction_id() +``` + + +### `elasticapm.get_trace_id()` [api-get-trace-id] + +```{applies_to} +apm_agent_python: ga 5.2.0 +``` + +Get the `trace_id` of the current transaction’s trace. Example: + +```python +import elasticapm + +trace_id = elasticapm.get_trace_id() +``` + + +### `elasticapm.get_span_id()` [api-get-span-id] + +```{applies_to} +apm_agent_python: ga 5.2.0 +``` + +Get the id of the current span. Example: + +```python +import elasticapm + +span_id = elasticapm.get_span_id() +``` + + +### `elasticapm.set_custom_context()` [api-set-custom-context] + +```{applies_to} +apm_agent_python: ga 2.0.0 +``` + +Attach custom contextual data to the current transaction and errors. Supported frameworks will automatically attach information about the HTTP request and the logged in user. You can attach further data using this function. + +::::{tip} +Before using custom context, ensure you understand the different types of [metadata](docs-content://solutions/observability/apm/metadata.md) that are available. +:::: + + +Example: + +```python +import elasticapm + +elasticapm.set_custom_context({'billing_amount': product.price * item_count}) +``` + +* `data`: (**required**) A dictionary with the data to be attached. This should be a flat key/value `dict` object. + +::::{note} +`.`, `*`, and `"` are invalid characters for key names and will be replaced with `_`. +:::: + + +Errors that happen after this call will also have the custom context attached to them. You can call this function multiple times, new context data will be merged with existing data, following the `update()` semantics of Python dictionaries. + + +### `elasticapm.set_user_context()` [api-set-user-context] + +```{applies_to} +apm_agent_python: ga 2.0.0 +``` + +Attach information about the currently logged in user to the current transaction and errors. Example: + +```python +import elasticapm + +elasticapm.set_user_context(username=user.username, email=user.email, user_id=user.id) +``` + +* `username`: The username of the logged in user +* `email`: The email of the logged in user +* `user_id`: The unique identifier of the logged in user, e.g. the primary key value + +Errors that happen after this call will also have the user context attached to them. You can call this function multiple times, new user data will be merged with existing data, following the `update()` semantics of Python dictionaries. + + +### `elasticapm.capture_span` [api-capture-span] + +```{applies_to} +apm_agent_python: ga 4.1.0 +``` + +Capture a custom span. This can be used either as a function decorator or as a context manager (in a `with` statement). When used as a decorator, the name of the span will be set to the name of the function. When used as a context manager, a name has to be provided. + +```python +import elasticapm + +@elasticapm.capture_span() +def coffee_maker(strength): + fetch_water() + + with elasticapm.capture_span('near-to-machine', labels={"type": "arabica"}): + insert_filter() + for i in range(strength): + pour_coffee() + + start_drip() + + fresh_pots() +``` + +* `name`: The name of the span. Defaults to the function name if used as a decorator. +* `span_type`: (**optional**) The type of the span, usually in a dot-separated hierarchy of `type`, `subtype`, and `action`, e.g. `db.mysql.query`. Alternatively, type, subtype and action can be provided as three separate arguments, see `span_subtype` and `span_action`. +* `skip_frames`: (**optional**) The number of stack frames to skip when collecting stack traces. Defaults to `0`. +* `leaf`: (**optional**) if `True`, all spans nested below this span will be ignored. Defaults to `False`. +* `labels`: (**optional**) a dictionary of labels. Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`). Defaults to `None`. +* `span_subtype`: (**optional**) subtype of the span, e.g. name of the database. Defaults to `None`. +* `span_action`: (**optional**) action of the span, e.g. `query`. Defaults to `None`. +* `links`: (**optional**) A list of `TraceParent` objects to which this span is causally linked. + + +### `elasticapm.async_capture_span` [api-async-capture-span] + +```{applies_to} +apm_agent_python: ga 5.4.0 +``` + +Capture a custom async-aware span. This can be used either as a function decorator or as a context manager (in an `async with` statement). When used as a decorator, the name of the span will be set to the name of the function. When used as a context manager, a name has to be provided. + +```python +import elasticapm + +@elasticapm.async_capture_span() +async def coffee_maker(strength): + await fetch_water() + + async with elasticapm.async_capture_span('near-to-machine', labels={"type": "arabica"}): + await insert_filter() + async for i in range(strength): + await pour_coffee() + + start_drip() + + fresh_pots() +``` + +* `name`: The name of the span. Defaults to the function name if used as a decorator. +* `span_type`: (**optional**) The type of the span, usually in a dot-separated hierarchy of `type`, `subtype`, and `action`, e.g. `db.mysql.query`. Alternatively, type, subtype and action can be provided as three separate arguments, see `span_subtype` and `span_action`. +* `skip_frames`: (**optional**) The number of stack frames to skip when collecting stack traces. Defaults to `0`. +* `leaf`: (**optional**) if `True`, all spans nested below this span will be ignored. Defaults to `False`. +* `labels`: (**optional**) a dictionary of labels. Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`). Defaults to `None`. +* `span_subtype`: (**optional**) subtype of the span, e.g. name of the database. Defaults to `None`. +* `span_action`: (**optional**) action of the span, e.g. `query`. Defaults to `None`. +* `links`: (**optional**) A list of `TraceParent` objects to which this span is causally linked. + +::::{note} +`asyncio` is only supported for Python 3.7+. +:::: + + + +### `elasticapm.label()` [api-label] + +```{applies_to} +apm_agent_python: ga 5.0.0 +``` + +Attach labels to the the current transaction and errors. + +::::{tip} +Before using custom labels, ensure you understand the different types of [metadata](docs-content://solutions/observability/apm/metadata.md) that are available. +:::: + + +Example: + +```python +import elasticapm + +elasticapm.label(ecommerce=True, dollar_value=47.12) +``` + +Errors that happen after this call will also have the labels attached to them. You can call this function multiple times, new labels will be merged with existing labels, following the `update()` semantics of Python dictionaries. + +Keys must be strings, values can be strings, booleans, or numerical (`int`, `float`, `decimal.Decimal`) `.`, `*`, and `"` are invalid characters for label names and will be replaced with `_`. + +::::{warning} +Avoid defining too many user-specified labels. Defining too many unique fields in an index is a condition that can lead to a [mapping explosion](docs-content://manage-data/data-store/mapping.md#mapping-limit-settings). +:::: + + diff --git a/docs/reference/asgi-middleware.md b/docs/reference/asgi-middleware.md new file mode 100644 index 000000000..ef33234ec --- /dev/null +++ b/docs/reference/asgi-middleware.md @@ -0,0 +1,72 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/asgi-middleware.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: preview +--- + +# ASGI Middleware [asgi-middleware] + +::::{warning} +This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. +:::: + + +Incorporating Elastic APM into your ASGI-based project only requires a few easy steps. + +::::{note} +Several ASGI frameworks are supported natively. Please check [Supported Technologies](/reference/supported-technologies.md) for more information +:::: + + + +## Installation [asgi-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add `elastic-apm` to your project’s `requirements.txt` file. + + +## Setup [asgi-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To set up the APM agent, wrap your ASGI app with the `ASGITracingMiddleware`: + +```python +from elasticapm.contrib.asgi import ASGITracingMiddleware + +app = MyGenericASGIApp() # depending on framework + +app = ASGITracingMiddleware(app) +``` + +Make sure to call [`elasticapm.set_transaction_name()`](/reference/api-reference.md#api-set-transaction-name) with an appropriate transaction name in all your routes. + +::::{note} +Currently, the agent doesn’t support automatic capturing of exceptions. You can follow progress on this issue on [Github](https://github.com/elastic/apm-agent-python/issues/1548). +:::: + + + +## Supported Python versions [supported-python-versions] + +A list of supported [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + +::::{note} +Elastic APM only supports `asyncio` when using Python 3.7+ +:::: + + diff --git a/docs/reference/azure-functions-support.md b/docs/reference/azure-functions-support.md new file mode 100644 index 000000000..a2571bb6e --- /dev/null +++ b/docs/reference/azure-functions-support.md @@ -0,0 +1,59 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/azure-functions-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Monitoring Azure Functions [azure-functions-support] + + +## Prerequisites [_prerequisites_2] + +You need an APM Server to which you can send APM data. Follow the [APM Quick start](docs-content://solutions/observability/apm/get-started-fleet-managed-apm-server.md) if you have not set one up yet. For the best-possible performance, we recommend setting up APM on {{ecloud}} in the same Azure region as your Azure Functions app. + +::::{note} +Currently, only HTTP and timer triggers are supported. Other trigger types may be captured as well, but the amount of captured contextual data may differ. +:::: + + + +## Step 1: Enable Worker Extensions [_step_1_enable_worker_extensions] + +Elastic APM uses [Worker Extensions](https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=asgi%2Capplication-level&pivots=python-mode-configuration#python-worker-extensions) to instrument Azure Functions. This feature is not enabled by default, and must be enabled in your Azure Functions App. Please follow the instructions in the [Azure docs](https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=asgi%2Capplication-level&pivots=python-mode-configuration#using-extensions). + +Once you have enabled Worker Extensions, these two lines of code will enable Elastic APM’s extension: + +```python +from elasticapm.contrib.serverless.azure import ElasticAPMExtension + +ElasticAPMExtension.configure() +``` + +Put them somewhere at the top of your Python file, before the function definitions. + + +## Step 2: Install the APM Python Agent [_step_2_install_the_apm_python_agent] + +You need to add `elastic-apm` as a dependency for your Functions app. Simply add `elastic-apm` to your `requirements.txt` file. We recommend pinning the version to the current newest version of the agent, and periodically updating the version. + + +## Step 3: Configure APM on Azure Functions [_step_3_configure_apm_on_azure_functions] + +The APM Python agent is configured through [App Settings](https://learn.microsoft.com/en-us/azure/azure-functions/functions-how-to-use-azure-function-app-settings?tabs=portal#settings). These are then picked up by the agent as environment variables. + +For the minimal configuration, you will need the [`ELASTIC_APM_SERVER_URL`](/reference/configuration.md#config-server-url) to set the destination for APM data and a [`ELASTIC_APM_SECRET_TOKEN`](/reference/configuration.md#config-secret-token). If you prefer to use an [APM API key](docs-content://solutions/observability/apm/api-keys.md) instead of the APM secret token, use the [`ELASTIC_APM_API_KEY`](/reference/configuration.md#config-api-key) environment variable instead of `ELASTIC_APM_SECRET_TOKEN` in the following example configuration. + +```bash +$ az functionapp config appsettings set --settings ELASTIC_APM_SERVER_URL=https://example.apm.northeurope.azure.elastic-cloud.com:443 +$ az functionapp config appsettings set --settings ELASTIC_APM_SECRET_TOKEN=verysecurerandomstring +``` + +You can optionally [fine-tune the Python agent](/reference/configuration.md). + +That’s it; Once the agent is installed and working, spans will be captured for [supported technologies](/reference/supported-technologies.md). You can also use [`capture_span`](/reference/api-reference.md#api-capture-span) to capture custom spans, and you can retrieve the `Client` object for capturing exceptions/messages using [`get_client`](/reference/api-reference.md#api-get-client). + diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md new file mode 100644 index 000000000..cdc0c744e --- /dev/null +++ b/docs/reference/configuration.md @@ -0,0 +1,1089 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Configuration [configuration] + +To adapt the Elastic APM agent to your needs, configure it using environment variables or framework-specific configuration. + +You can either configure the agent by setting environment variables: + +```bash +ELASTIC_APM_SERVICE_NAME=foo python manage.py runserver +``` + +or with inline configuration: + +```python +apm_client = Client(service_name="foo") +``` + +or by using framework specific configuration e.g. in your Django `settings.py` file: + +```python +ELASTIC_APM = { + "SERVICE_NAME": "foo", +} +``` + +The precedence is as follows: + +* [Central configuration](#config-central_config) (supported options are marked with [![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration)) +* Environment variables +* Inline configuration +* Framework-specific configuration +* Default value + + +## Dynamic configuration [dynamic-configuration] + +Configuration options marked with the ![dynamic config](images/dynamic-config.svg "") badge can be changed at runtime when set from a supported source. + +The Python Agent supports [Central configuration](docs-content://solutions/observability/apm/apm-agent-central-configuration.md), which allows you to fine-tune certain configurations from in the APM app. This feature is enabled in the Agent by default with [`central_config`](#config-central_config). + + +## Django [django-configuration] + +To configure Django, add an `ELASTIC_APM` dictionary to your `settings.py`: + +```python +ELASTIC_APM = { + 'SERVICE_NAME': 'my-app', + 'SECRET_TOKEN': 'changeme', +} +``` + + +## Flask [flask-configuration] + +To configure Flask, add an `ELASTIC_APM` dictionary to your `app.config`: + +```python +app.config['ELASTIC_APM'] = { + 'SERVICE_NAME': 'my-app', + 'SECRET_TOKEN': 'changeme', +} + +apm = ElasticAPM(app) +``` + + +## Core options [core-options] + + +### `service_name` [config-service-name] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_SERVICE_NAME` | `SERVICE_NAME` | `unknown-python-service` | `my-app` | + +The name of your service. This is used to keep all the errors and transactions of your service together and is the primary filter in the Elastic APM user interface. + +While a default is provided, it is essential that you override this default with something more descriptive and unique across your infrastructure. + +::::{note} +The service name must conform to this regular expression: `^[a-zA-Z0-9 _-]+$`. In other words, the service name must only contain characters from the ASCII alphabet, numbers, dashes, underscores, and spaces. It cannot be an empty string or whitespace-only. +:::: + + + +### `server_url` [config-server-url] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SERVER_URL` | `SERVER_URL` | `'http://127.0.0.1:8200'` | + +The URL for your APM Server. The URL must be fully qualified, including protocol (`http` or `https`) and port. Note: Do not set this if you are using APM in an AWS lambda function. APM Agents are designed to proxy their calls to the APM Server through the lambda extension. Instead, set `ELASTIC_APM_LAMBDA_APM_SERVER`. For more info, see [AWS Lambda](lambda-support.md). + + +## `enabled` [config-enabled] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_ENABLED` | `ENABLED` | `true` | + +Enable or disable the agent. When set to false, the agent will not collect any data or start any background threads. + + +## `recording` [config-recording] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_RECORDING` | `RECORDING` | `true` | + +Enable or disable recording of events. If set to false, then the Python agent does not send any events to the Elastic APM server, and instrumentation overhead is minimized. The agent will continue to poll the server for configuration changes. + + +## Logging Options [logging-options] + + +### `log_level` [config-log_level] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_LOG_LEVEL` | `LOG_LEVEL` | | + +The `logging.logLevel` at which the `elasticapm` logger will log. The available options are: + +* `"off"` (sets `logging.logLevel` to 1000) +* `"critical"` +* `"error"` +* `"warning"` +* `"info"` +* `"debug"` +* `"trace"` (sets `logging.log_level` to 5) + +Options are case-insensitive + +Note that this option doesn’t do anything with logging handlers. In order for any logs to be visible, you must either configure a handler ([`logging.basicConfig`](https://docs.python.org/3/library/logging.html#logging.basicConfig) will do this for you) or set [`log_file`](#config-log_file). This will also override any log level your app has set for the `elasticapm` logger. + + +### `log_file` [config-log_file] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_LOG_FILE` | `LOG_FILE` | `""` | `"/var/log/elasticapm/log.txt"` | + +This enables the agent to log to a file. This is disabled by default. The agent will log at the `logging.logLevel` configured with [`log_level`](#config-log_level). Use [`log_file_size`](#config-log_file_size) to configure the maximum size of the log file. This log file will automatically rotate. + +Note that setting [`log_level`](#config-log_level) is required for this setting to do anything. + +If [`ecs_logging`](https://github.com/elastic/ecs-logging-python) is installed, the logs will automatically be formatted as ecs-compatible json. + + +### `log_file_size` [config-log_file_size] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_LOG_FILE_SIZE` | `LOG_FILE_SIZE` | `"50mb"` | `"100mb"` | + +The size of the log file if [`log_file`](#config-log_file) is set. + +The agent always keeps one backup file when rotating, so the maximum space that the log files will consume is twice the value of this setting. + + +### `log_ecs_reformatting` [config-log_ecs_reformatting] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_LOG_ECS_REFORMATTING` | `LOG_ECS_REFORMATTING` | `"off"` | + +::::{warning} +This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. +:::: + + +Valid options: + +* `"off"` +* `"override"` + +If [`ecs_logging`](https://github.com/elastic/ecs-logging-python) is installed, setting this to `"override"` will cause the agent to automatically attempt to enable ecs-formatted logging. + +For base `logging` from the standard library, the agent will get the root logger, find any attached handlers, and for each, set the formatter to `ecs_logging.StdlibFormatter()`. + +If `structlog` is installed, the agent will override any configured processors with `ecs_logging.StructlogFormatter()`. + +Note that this is a very blunt instrument that could have unintended side effects. If problems arise, please apply these formatters manually and leave this setting as `"off"`. See the [`ecs_logging` docs](ecs-logging-python://reference/installation.md) for more information about using these formatters. + +Also note that this setting does not facilitate shipping logs to Elasticsearch. We recommend [Filebeat](https://www.elastic.co/beats/filebeat) for that purpose. + + +## Other options [other-options] + + +### `transport_class` [config-transport-class] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_TRANSPORT_CLASS` | `TRANSPORT_CLASS` | `elasticapm.transport.http.Transport` | + +The transport class to use when sending events to the APM Server. + + +### `service_node_name` [config-service-node-name] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_SERVICE_NODE_NAME` | `SERVICE_NODE_NAME` | `None` | `"redis1"` | + +The name of the given service node. This is optional and if omitted, the APM Server will fall back on `system.container.id` if available, and `host.name` if necessary. + +This option allows you to set the node name manually to ensure it is unique and meaningful. + + +### `environment` [config-environment] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_ENVIRONMENT` | `ENVIRONMENT` | `None` | `"production"` | + +The name of the environment this service is deployed in, e.g. "production" or "staging". + +Environments allow you to easily filter data on a global level in the APM app. It’s important to be consistent when naming environments across agents. See [environment selector](docs-content://solutions/observability/apm/filter-data.md#apm-filter-your-data-service-environment-filter) in the APM app for more information. + +::::{note} +This feature is fully supported in the APM app in Kibana versions >= 7.2. You must use the query bar to filter for a specific environment in versions prior to 7.2. +:::: + + + +### `cloud_provider` [config-cloud-provider] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_CLOUD_PROVIDER` | `CLOUD_PROVIDER` | `"auto"` | `"aws"` | + +This config value allows you to specify which cloud provider should be assumed for metadata collection. By default, the agent will attempt to detect the cloud provider or, if that fails, will use trial and error to collect the metadata. + +Valid options are `"auto"`, `"aws"`, `"gcp"`, and `"azure"`. If this config value is set to `"none"`, then no cloud metadata will be collected. + + +### `secret_token` [config-secret-token] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_SECRET_TOKEN` | `SECRET_TOKEN` | `None` | A random string | + +This string is used to ensure that only your agents can send data to your APM Server. Both the agents and the APM Server have to be configured with the same secret token. An example to generate a secure secret token is: + +```bash +python -c "import secrets; print(secrets.token_urlsafe(32))" +``` + +::::{warning} +Secret tokens only provide any security if your APM Server uses TLS. +:::: + + + +### `api_key` [config-api-key] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_API_KEY` | `API_KEY` | `None` | A base64-encoded string | + +This base64-encoded string is used to ensure that only your agents can send data to your APM Server. The API key can be created in the [Applications UI](docs-content://solutions/observability/apm/api-keys.md#apm-create-an-api-key). + +::::{warning} +API keys only provide any real security if your APM Server uses TLS. +:::: + + + +### `service_version` [config-service-version] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_SERVICE_VERSION` | `SERVICE_VERSION` | `None` | A string indicating the version of the deployed service | + +A version string for the currently deployed version of the service. If youre deploys are not versioned, the recommended value for this field is the commit identifier of the deployed revision, e.g. the output of `git rev-parse HEAD`. + + +### `framework_name` [config-framework-name] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_FRAMEWORK_NAME` | `FRAMEWORK_NAME` | Depending on framework | + +The name of the used framework. For Django and Flask, this defaults to `django` and `flask` respectively, otherwise, the default is `None`. + + +### `framework_version` [config-framework-version] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_FRAMEWORK_VERSION` | `FRAMEWORK_VERSION` | Depending on framework | + +The version number of the used framework. For Django and Flask, this defaults to the used version of the framework, otherwise, the default is `None`. + + +### `filter_exception_types` [config-filter-exception-types] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_FILTER_EXCEPTION_TYPES` | `FILTER_EXCEPTION_TYPES` | `[]` | `['OperationalError', 'mymodule.SomeoneElsesProblemError']` | +| multiple values separated by commas, without spaces | | | | + +A list of exception types to be filtered. Exceptions of these types will not be sent to the APM Server. + + +### `transaction_ignore_urls` [config-transaction-ignore-urls] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_TRANSACTION_IGNORE_URLS` | `TRANSACTION_IGNORE_URLS` | `[]` | `['/api/ping', '/static/*']` | +| multiple values separated by commas, without spaces | | | | + +A list of URLs for which the agent should not capture any transaction data. + +Optionally, `*` can be used to match multiple URLs at once. + + +### `transactions_ignore_patterns` [config-transactions-ignore-patterns] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_TRANSACTIONS_IGNORE_PATTERNS` | `TRANSACTIONS_IGNORE_PATTERNS` | `[]` | `['^OPTIONS ', 'myviews.Healthcheck']` | +| multiple values separated by commas, without spaces | | | | + +A list of regular expressions. Transactions with a name that matches any of the configured patterns will be ignored and not sent to the APM Server. + +::::{note} +as the the name of the transaction can only be determined at the end of the transaction, the agent might still cause overhead for transactions ignored through this setting. If agent overhead is a concern, we recommend [`transaction_ignore_urls`](#config-transaction-ignore-urls) instead. +:::: + + + +### `server_timeout` [config-server-timeout] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SERVER_TIMEOUT` | `SERVER_TIMEOUT` | `"5s"` | + +A timeout for requests to the APM Server. The setting has to be provided in **[duration format](#config-format-duration)**. If a request to the APM Server takes longer than the configured timeout, the request is cancelled and the event (exception or transaction) is discarded. Set to `None` to disable timeouts. + +::::{warning} +If timeouts are disabled or set to a high value, your app could experience memory issues if the APM Server times out. +:::: + + + +### `hostname` [config-hostname] + +| Environment | Django/Flask | Default | Example | +| --- | --- | --- | --- | +| `ELASTIC_APM_HOSTNAME` | `HOSTNAME` | `socket.gethostname()` | `app-server01.example.com` | + +The host name to use when sending error and transaction data to the APM Server. + + +### `auto_log_stacks` [config-auto-log-stacks] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_AUTO_LOG_STACKS` | `AUTO_LOG_STACKS` | `True` | +| set to `"true"` / `"false"` | | | + +If set to `True` (the default), the agent will add a stack trace to each log event, indicating where the log message has been issued. + +This setting can be overridden on an individual basis by setting the `extra`-key `stack`: + +```python +logger.info('something happened', extra={'stack': False}) +``` + + +### `collect_local_variables` [config-collect-local-variables] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_COLLECT_LOCAL_VARIABLES` | `COLLECT_LOCAL_VARIABLES` | `errors` | + +Possible values: `errors`, `transactions`, `all`, `off` + +The Elastic APM Python agent can collect local variables for stack frames. By default, this is only done for errors. + +::::{note} +Collecting local variables has a non-trivial overhead. Collecting local variables for transactions in production environments can have adverse effects for the performance of your service. +:::: + + + +### `local_var_max_length` [config-local-var-max-length] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_LOCAL_VAR_MAX_LENGTH` | `LOCAL_VAR_MAX_LENGTH` | `200` | + +When collecting local variables, they will be converted to strings. This setting allows you to limit the length of the resulting string. + + +### `local_var_list_max_length` [config-local-list-var-max-length] + +| | | | +| --- | --- | --- | +| Environment | Django/Flask | Default | +| `ELASTIC_APM_LOCAL_VAR_LIST_MAX_LENGTH` | `LOCAL_VAR_LIST_MAX_LENGTH` | `10` | + +This setting allows you to limit the length of lists in local variables. + + +### `local_var_dict_max_length` [config-local-dict-var-max-length] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_LOCAL_VAR_DICT_MAX_LENGTH` | `LOCAL_VAR_DICT_MAX_LENGTH` | `10` | + +This setting allows you to limit the length of dicts in local variables. + + +### `source_lines_error_app_frames` [config-source-lines-error-app-frames] + + +### `source_lines_error_library_frames` [config-source-lines-error-library-frames] + + +### `source_lines_span_app_frames` [config-source-lines-span-app-frames] + + +### `source_lines_span_library_frames` [config-source-lines-span-library-frames] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SOURCE_LINES_ERROR_APP_FRAMES` | `SOURCE_LINES_ERROR_APP_FRAMES` | `5` | +| `ELASTIC_APM_SOURCE_LINES_ERROR_LIBRARY_FRAMES` | `SOURCE_LINES_ERROR_LIBRARY_FRAMES` | `5` | +| `ELASTIC_APM_SOURCE_LINES_SPAN_APP_FRAMES` | `SOURCE_LINES_SPAN_APP_FRAMES` | `0` | +| `ELASTIC_APM_SOURCE_LINES_SPAN_LIBRARY_FRAMES` | `SOURCE_LINES_SPAN_LIBRARY_FRAMES` | `0` | + +By default, the APM agent collects source code snippets for errors. This setting allows you to modify the number of lines of source code that are being collected. + +We differ between errors and spans, as well as library frames and app frames. + +::::{warning} +Especially for spans, collecting source code can have a large impact on storage use in your Elasticsearch cluster. +:::: + + + +### `capture_body` [config-capture-body] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_CAPTURE_BODY` | `CAPTURE_BODY` | `off` | + +For transactions that are HTTP requests, the Python agent can optionally capture the request body (e.g. `POST` variables). + +Possible values: `errors`, `transactions`, `all`, `off`. + +If the request has a body and this setting is disabled, the body will be shown as `[REDACTED]`. + +For requests with a content type of `multipart/form-data`, any uploaded files will be referenced in a special `_files` key. It contains the name of the field and the name of the uploaded file, if provided. + +::::{warning} +Request bodies often contain sensitive values like passwords and credit card numbers. If your service handles data like this, we advise to only enable this feature with care. +:::: + + + +### `capture_headers` [config-capture-headers] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_CAPTURE_HEADERS` | `CAPTURE_HEADERS` | `true` | + +For transactions and errors that happen due to HTTP requests, the Python agent can optionally capture the request and response headers. + +Possible values: `true`, `false` + +::::{warning} +Request headers often contain sensitive values like session IDs and cookies. See [sanitizing data](sanitizing-data.md) for more information on how to filter out sensitive data. +:::: + + + +### `transaction_max_spans` [config-transaction-max-spans] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_TRANSACTION_MAX_SPANS` | `TRANSACTION_MAX_SPANS` | `500` | + +This limits the amount of spans that are recorded per transaction. This is helpful in cases where a transaction creates a very high amount of spans (e.g. thousands of SQL queries). Setting an upper limit will prevent edge cases from overloading the agent and the APM Server. + + +### `stack_trace_limit` [config-stack-trace-limit] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_STACK_TRACE_LIMIT` | `STACK_TRACE_LIMIT` | `50` | + +This limits the number of frames captured for each stack trace. + +Setting the limit to `0` will disable stack trace collection, while any positive integer value will be used as the maximum number of frames to collect. To disable the limit and always capture all frames, set the value to `-1`. + + +### `span_stack_trace_min_duration` [config-span-stack-trace-min-duration] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SPAN_STACK_TRACE_MIN_DURATION` | `SPAN_STACK_TRACE_MIN_DURATION` | `"5ms"` | + +By default, the APM agent collects a stack trace with every recorded span that has a duration equal to or longer than this configured threshold. While stack traces are very helpful to find the exact place in your code from which a span originates, collecting this stack trace does have some overhead. Tune this threshold to ensure that you only collect stack traces for spans that could be problematic. + +To collect traces for all spans, regardless of their length, set the value to `0`. + +To disable stack trace collection for spans completely, set the value to `-1`. + +Except for the special values `-1` and `0`, this setting should be provided in **[duration format](#config-format-duration)**. + + +### `span_frames_min_duration` [config-span-frames-min-duration] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SPAN_FRAMES_MIN_DURATION` | `SPAN_FRAMES_MIN_DURATION` | `"5ms"` | + +::::{note} +This config value is being deprecated. Use [`span_stack_trace_min_duration`](#config-span-stack-trace-min-duration) instead. +:::: + + + +### `span_compression_enabled` [config-span-compression-enabled] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SPAN_COMPRESSION_ENABLED` | `SPAN_COMPRESSION_ENABLED` | `True` | + +Enable/disable span compression. + +If enabled, the agent will compress very short, repeated spans into a single span, which is beneficial for storage and processing requirements. Some information is lost in this process, e.g. exact durations of each compressed span. + + +### `span_compression_exact_match_max_duration` [config-span-compression-exact-match-max_duration] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION` | `SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION` | `"50ms"` | + +Consecutive spans that are exact match and that are under this threshold will be compressed into a single composite span. This reduces the collection, processing, and storage overhead, and removes clutter from the UI. The tradeoff is that the DB statements of all the compressed spans will not be collected. + +Two spans are considered exact matches if the following attributes are identical: * span name * span type * span subtype * destination resource (e.g. the Database name) + + +### `span_compression_same_kind_max_duration` [config-span-compression-same-kind-max-duration] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION` | `SPAN_COMPRESSION_SAME_KIND_MAX_DURATION` | `"0ms"` (disabled) | + +Consecutive spans to the same destination that are under this threshold will be compressed into a single composite span. This reduces the collection, processing, and storage overhead, and removes clutter from the UI. The tradeoff is that metadata such as database statements of all the compressed spans will not be collected. + +Two spans are considered to be of the same kind if the following attributes are identical: * span type * span subtype * destination resource (e.g. the Database name) + + +### `exit_span_min_duration` [config-exit-span-min-duration] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_EXIT_SPAN_MIN_DURATION` | `EXIT_SPAN_MIN_DURATION` | `"0ms"` | + +Exit spans are spans that represent a call to an external service, like a database. If such calls are very short, they are usually not relevant and can be ignored. + +This feature is disabled by default. + +::::{note} +if a span propagates distributed tracing IDs, it will not be ignored, even if it is shorter than the configured threshold. This is to ensure that no broken traces are recorded. +:::: + + + +### `api_request_size` [config-api-request-size] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_API_REQUEST_SIZE` | `API_REQUEST_SIZE` | `"768kb"` | + +The maximum queue length of the request buffer before sending the request to the APM Server. A lower value will increase the load on your APM Server, while a higher value can increase the memory pressure of your app. A higher value also impacts the time until data is indexed and searchable in Elasticsearch. + +This setting is useful to limit memory consumption if you experience a sudden spike of traffic. It has to be provided in **[size format](#config-format-size)**. + +::::{note} +Due to internal buffering of gzip, the actual request size can be a few kilobytes larger than the given limit. By default, the APM Server limits request payload size to `1 MByte`. +:::: + + + +### `api_request_time` [config-api-request-time] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_API_REQUEST_TIME` | `API_REQUEST_TIME` | `"10s"` | + +The maximum queue time of the request buffer before sending the request to the APM Server. A lower value will increase the load on your APM Server, while a higher value can increase the memory pressure of your app. A higher value also impacts the time until data is indexed and searchable in Elasticsearch. + +This setting is useful to limit memory consumption if you experience a sudden spike of traffic. It has to be provided in **[duration format](#config-format-duration)**. + +::::{note} +The actual time will vary between 90-110% of the given value, to avoid stampedes of instances that start at the same time. +:::: + + + +### `processors` [config-processors] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_PROCESSORS` | `PROCESSORS` | `['elasticapm.processors.sanitize_stacktrace_locals', 'elasticapm.processors.sanitize_http_request_cookies', 'elasticapm.processors.sanitize_http_headers', 'elasticapm.processors.sanitize_http_wsgi_env', 'elasticapm.processors.sanitize_http_request_body']` | + +A list of processors to process transactions and errors. For more information, see [Sanitizing Data](sanitizing-data.md). + +::::{warning} +We recommend always including the default set of validators if you customize this setting. +:::: + + + +### `sanitize_field_names` [config-sanitize-field-names] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SANITIZE_FIELD_NAMES` | `SANITIZE_FIELD_NAMES` | `["password", "passwd", "pwd", "secret", "*key", "*token*", "*session*", "*credit*", "*card*", "*auth*", "*principal*", "set-cookie"]` | + +A list of glob-matched field names to match and mask when using processors. For more information, see [Sanitizing Data](sanitizing-data.md). + +::::{warning} +We recommend always including the default set of field name matches if you customize this setting. +:::: + + + +### `transaction_sample_rate` [config-transaction-sample-rate] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_TRANSACTION_SAMPLE_RATE` | `TRANSACTION_SAMPLE_RATE` | `1.0` | + +By default, the agent samples every transaction (e.g. request to your service). To reduce overhead and storage requirements, set the sample rate to a value between `0.0` and `1.0`. We still record overall time and the result for unsampled transactions, but no context information, labels, or spans. + +::::{note} +This setting will be automatically rounded to 4 decimals of precision. +:::: + + + +### `include_paths` [config-include-paths] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_INCLUDE_PATHS` | `INCLUDE_PATHS` | `[]` | +| multiple values separated by commas, without spaces | | | + +A set of paths, optionally using shell globs (see [`fnmatch`](https://docs.python.org/3/library/fnmatch.html) for a description of the syntax). These are matched against the absolute filename of every frame, and if a pattern matches, the frame is considered to be an "in-app frame". + +`include_paths` **takes precedence** over `exclude_paths`. + + +### `exclude_paths` [config-exclude-paths] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_EXCLUDE_PATHS` | `EXCLUDE_PATHS` | Varies on Python version and implementation | +| multiple values separated by commas, without spaces | | | + +A set of paths, optionally using shell globs (see [`fnmatch`](https://docs.python.org/3/library/fnmatch.html) for a description of the syntax). These are matched against the absolute filename of every frame, and if a pattern matches, the frame is considered to be a "library frame". + +`include_paths` **takes precedence** over `exclude_paths`. + +The default value varies based on your Python version and implementation, e.g.: + +* PyPy3: `['\*/lib-python/3/*', '\*/site-packages/*']` +* CPython 2.7: `['\*/lib/python2.7/*', '\*/lib64/python2.7/*']` + + +### `debug` [config-debug] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_DEBUG` | `DEBUG` | `False` | + +If your app is in debug mode (e.g. in Django with `settings.DEBUG = True` or in Flask with `app.debug = True`), the agent won’t send any data to the APM Server. You can override it by changing this setting to `True`. + + +### `disable_send` [config-disable-send] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_DISABLE_SEND` | `DISABLE_SEND` | `False` | + +If set to `True`, the agent won’t send any events to the APM Server, independent of any debug state. + + +### `instrument` [config-instrument] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_INSTRUMENT` | `INSTRUMENT` | `True` | + +If set to `False`, the agent won’t instrument any code. This disables most of the tracing functionality, but can be useful to debug possible instrumentation issues. + + +### `verify_server_cert` [config-verify-server-cert] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_VERIFY_SERVER_CERT` | `VERIFY_SERVER_CERT` | `True` | + +By default, the agent verifies the SSL certificate if an HTTPS connection to the APM Server is used. Verification can be disabled by changing this setting to `False`. This setting is ignored when [`server_cert`](#config-server-cert) is set. + + +### `server_cert` [config-server-cert] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SERVER_CERT` | `SERVER_CERT` | `None` | + +If you have configured your APM Server with a self-signed TLS certificate, or you just wish to pin the server certificate, you can specify the path to the PEM-encoded certificate via the `ELASTIC_APM_SERVER_CERT` configuration. + +::::{note} +If this option is set, the agent only verifies that the certificate provided by the APM Server is identical to the one configured here. Validity of the certificate is not checked. +:::: + + + +### `server_ca_cert_file` [config-server-ca-cert-file] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SERVER_CA_CERT_FILE` | `SERVER_CA_CERT_FILE` | `None` | + +By default, the agent will validate the TLS/SSL certificate of the APM Server using the well-known CAs curated by Mozilla, and provided by the [`certifi`](https://pypi.org/project/certifi/) package. + +You can set this option to the path of a file containing a CA certificate that will be used instead. + +Specifying this option is required when using self-signed certificates, unless server certificate validation is disabled. + + +### `use_certifi` [config-use-certifi] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_USE_CERTIFI` | `USE_CERTIFI` | `True` | + +By default, the Python Agent uses the [`certifi`](https://pypi.org/project/certifi/) certificate store. To use Python’s default mechanism for finding certificates, set this option to `False`. + + +### `metrics_interval` [config-metrics_interval] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_METRICS_INTERVAL` | `METRICS_INTERVAL` | `30s` | + +The interval in which the agent collects metrics. A shorter interval increases the granularity of metrics, but also increases the overhead of the agent, as well as storage requirements. + +It has to be provided in **[duration format](#config-format-duration)**. + + +### `disable_metrics` [config-disable_metrics] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_DISABLE_METRICS` | `DISABLE_METRICS` | `None` | + +A comma-separated list of dotted metrics names that should not be sent to the APM Server. You can use `*` to match multiple metrics; for example, to disable all CPU-related metrics, as well as the "total system memory" metric, set `disable_metrics` to: + +``` +"*.cpu.*,system.memory.total" +``` +::::{note} +This setting only disables the **sending** of the given metrics, not collection. +:::: + + + +### `breakdown_metrics` [config-breakdown_metrics] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_BREAKDOWN_METRICS` | `BREAKDOWN_METRICS` | `True` | + +Enable or disable the tracking and collection of breakdown metrics. Setting this to `False` disables the tracking of breakdown metrics, which can reduce the overhead of the agent. + +::::{note} +This feature requires APM Server and Kibana >= 7.3. +:::: + + + +### `prometheus_metrics` (Beta) [config-prometheus_metrics] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_PROMETHEUS_METRICS` | `PROMETHEUS_METRICS` | `False` | + +Enable/disable the tracking and collection of metrics from `prometheus_client`. + +See [Prometheus metric set (beta)](metrics.md#prometheus-metricset) for more information. + +::::{note} +This feature is currently in beta status. +:::: + + + +### `prometheus_metrics_prefix` (Beta) [config-prometheus_metrics_prefix] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_PROMETHEUS_METRICS_PREFIX` | `PROMETHEUS_METRICS_PREFIX` | `prometheus.metrics.` | + +A prefix to prepend to Prometheus metrics names. + +See [Prometheus metric set (beta)](metrics.md#prometheus-metricset) for more information. + +::::{note} +This feature is currently in beta status. +:::: + + + +### `metrics_sets` [config-metrics_sets] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_METRICS_SETS` | `METRICS_SETS` | ["elasticapm.metrics.sets.cpu.CPUMetricSet"] | + +List of import paths for the MetricSets that should be used to collect metrics. + +See [Custom Metrics](metrics.md#custom-metrics) for more information. + + +### `central_config` [config-central_config] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_CENTRAL_CONFIG` | `CENTRAL_CONFIG` | `True` | + +When enabled, the agent will make periodic requests to the APM Server to fetch updated configuration. + +See [Dynamic configuration](#dynamic-configuration) for more information. + +::::{note} +This feature requires APM Server and Kibana >= 7.3. +:::: + + + +### `global_labels` [config-global_labels] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_GLOBAL_LABELS` | `GLOBAL_LABELS` | `None` | + +Labels added to all events, with the format `key=value[,key=value[,...]]`. Any labels set by application via the API will override global labels with the same keys. + +::::{note} +This feature requires APM Server >= 7.2. +:::: + + + +### `disable_log_record_factory` [config-generic-disable-log-record-factory] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_DISABLE_LOG_RECORD_FACTORY` | `DISABLE_LOG_RECORD_FACTORY` | `False` | + +By default in python 3, the agent installs a [LogRecord factory](logs.md#logging) that automatically adds tracing fields to your log records. Disable this behavior by setting this to `True`. + + +### `use_elastic_traceparent_header` [config-use-elastic-traceparent-header] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_USE_ELASTIC_TRACEPARENT_HEADER` | `USE_ELASTIC_TRACEPARENT_HEADER` | `True` | + +To enable [distributed tracing](docs-content://solutions/observability/apm/traces.md), the agent sets a number of HTTP headers to outgoing requests made with [instrumented HTTP libraries](supported-technologies.md#automatic-instrumentation-http). These headers (`traceparent` and `tracestate`) are defined in the [W3C Trace Context](https://www.w3.org/TR/trace-context-1/) specification. + +Additionally, when this setting is set to `True`, the agent will set `elasticapm-traceparent` for backwards compatibility. + + +### `trace_continuation_strategy` [config-trace-continuation-strategy] + +[![dynamic config](images/dynamic-config.svg "") ](#dynamic-configuration) + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_TRACE_CONTINUATION_STRATEGY` | `TRACE_CONTINUATION_STRATEGY` | `continue` | + +This option allows some control on how the APM agent handles W3C trace-context headers on incoming requests. By default, the `traceparent` and `tracestate` headers are used per W3C spec for distributed tracing. However, in certain cases it can be helpful to **not** use the incoming `traceparent` header. Some example use cases: + +* An Elastic-monitored service is receiving requests with `traceparent` headers from **unmonitored** services. +* An Elastic-monitored service is publicly exposed, and does not want tracing data (trace-ids, sampling decisions) to possibly be spoofed by user requests. + +Valid values are: + +* `'continue'`: The default behavior. An incoming `traceparent` value is used to continue the trace and determine the sampling decision. +* `'restart'`: Always ignores the `traceparent` header of incoming requests. A new trace-id will be generated and the sampling decision will be made based on [`transaction_sample_rate`](#config-transaction-sample-rate). A **span link** will be made to the incoming traceparent. +* `'restart_external'`: If an incoming request includes the `es` vendor flag in `tracestate`, then any *traceparent* will be considered internal and will be handled as described for `'continue'` above. Otherwise, any `'traceparent'` is considered external and will be handled as described for `'restart'` above. + +Starting with Elastic Observability 8.2, span links will be visible in trace views. + + +### `use_elastic_excepthook` [config-use-elastic-excepthook] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_USE_ELASTIC_EXCEPTHOOK` | `USE_ELASTIC_EXCEPTHOOK` | `False` | + +If set to `True`, the agent will intercept the default `sys.excepthook`, which allows the agent to collect all uncaught exceptions. + + +### `include_process_args` [config-include-process-args] + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_INCLUDE_PROCESS_ARGS` | `INCLUDE_PROCESS_ARGS` | `False` | + +Whether each transaction should have the process arguments attached. Disabled by default to save disk space. + + +### `skip_server_info` [config-skip-server-info] + +```{applies_to} +apm_agent_python: preview 6.25.0 +``` + +| Environment | Django/Flask | Default | +| --- | --- | --- | +| `ELASTIC_APM_SKIP_SERVER_INFO` | `SKIP_SERVER_INFO` | `False` | + +Whether we should skip the server info check to save some latency on constrained environments like AWS Lambda. Disabled by default. + +::::{warning} +This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. +:::: + +::::{warning} +This requires sending data to an APM Server newer than 8.7.0 in order to work properly. +:::: + + +## Django-specific configuration [config-django-specific] + + +### `django_transaction_name_from_route` [config-django-transaction-name-from-route] + +| Environment | Django | Default | +| --- | --- | --- | +| `ELASTIC_APM_DJANGO_TRANSACTION_NAME_FROM_ROUTE` | `DJANGO_TRANSACTION_NAME_FROM_ROUTE` | `False` | + +By default, we use the function or class name of the view as the transaction name. Starting with Django 2.2, Django makes the route (e.g. `users//`) available on the `request.resolver_match` object. If you want to use the route instead of the view name as the transaction name, set this config option to `true`. + +::::{note} +in versions previous to Django 2.2, changing this setting will have no effect. +:::: + + + +### `django_autoinsert_middleware` [config-django-autoinsert-middleware] + +| Environment | Django | Default | +| --- | --- | --- | +| `ELASTIC_APM_DJANGO_AUTOINSERT_MIDDLEWARE` | `DJANGO_AUTOINSERT_MIDDLEWARE` | `True` | + +To trace Django requests, the agent uses a middleware, `elasticapm.contrib.django.middleware.TracingMiddleware`. By default, this middleware is inserted automatically as the first item in `settings.MIDDLEWARES`. To disable the automatic insertion of the middleware, change this setting to `False`. + + +## Generic Environment variables [config-generic-environment] + +Some environment variables that are not specific to the APM agent can be used to configure the agent. + + +### `HTTP_PROXY` and `HTTPS_PROXY` [config-generic-http-proxy] + +By using `HTTP_PROXY` and `HTTPS_PROXY`, the agent can be instructed to use a proxy to connect to the APM Server. If both are set, `HTTPS_PROXY` takes precedence. + +::::{note} +The environment variables are case-insensitive. +:::: + + + +### `NO_PROXY` [config-generic-no-proxy] + +To instruct the agent to **not** use a proxy, you can use the `NO_PROXY` environment variable. You can either set it to a comma-separated list of hosts for which no proxy should be used (e.g. `localhost,example.com`) or use `*` to match any host. + +This is useful if `HTTP_PROXY` / `HTTPS_PROXY` is set for other reasons than agent / APM Server communication. + + +### `SSL_CERT_FILE` and `SSL_CERT_DIR` [config-ssl-cert-file] + +To tell the agent to use a different SSL certificate, you can use these environment variables. See also [OpenSSL docs](https://www.openssl.org/docs/manmaster/man7/openssl-env.html#SSL_CERT_DIR-SSL_CERT_FILE). + +Please note that these variables may apply to other SSL/TLS communication in your service, not just related to the APM agent. + +::::{note} +These environment variables only take effect if [`use_certifi`](#config-use-certifi) is set to `False`. +:::: + + + +## Configuration formats [config-formats] + +Some options require a unit, either duration or size. These need to be provided in a specific format. + + +### Duration format [config-format-duration] + +The *duration* format is used for options like timeouts. The unit is provided as a suffix directly after the number–without any separation by whitespace. + +**Example**: `5ms` + +**Supported units** + +* `us` (microseconds) +* `ms` (milliseconds) +* `s` (seconds) +* `m` (minutes) + + +### Size format [config-format-size] + +The *size* format is used for options like maximum buffer sizes. The unit is provided as suffix directly after the number, without and separation by whitespace. + +**Example**: `10kb` + +**Supported units**: + +* `b` (bytes) +* `kb` (kilobytes) +* `mb` (megabytes) +* `gb` (gigabytes) + +::::{note} +We use the power-of-two sizing convention, e.g. `1 kilobyte == 1024 bytes` +:::: + + diff --git a/docs/reference/django-support.md b/docs/reference/django-support.md new file mode 100644 index 000000000..61e5991ae --- /dev/null +++ b/docs/reference/django-support.md @@ -0,0 +1,333 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/django-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Django support [django-support] + +Getting Elastic APM set up for your Django project is easy, and there are various ways you can tweak it to fit to your needs. + + +## Installation [django-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add it to your project’s `requirements.txt` file. + +::::{note} +For apm-server 6.2+, make sure you use version 2.0 or higher of `elastic-apm`. +:::: + + +::::{note} +If you use Django with uwsgi, make sure to [enable threads](http://uwsgi-docs.readthedocs.org/en/latest/Options.html#enable-threads) (enabled by default since 2.0.27) and [py-call-uwsgi-fork-hooks](https://uwsgi-docs.readthedocs.io/en/latest/Options.html#py-call-uwsgi-fork-hooks). +:::: + + + +## Setup [django-setup] + +Set up the Elastic APM agent in Django with these two steps: + +1. Add `elasticapm.contrib.django` to `INSTALLED_APPS` in your settings: + +```python +INSTALLED_APPS = ( + # ... + 'elasticapm.contrib.django', +) +``` + +1. Choose a service name, and set the secret token if needed. + +```python +ELASTIC_APM = { + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', +} +``` + +or as environment variables: + +```shell +ELASTIC_APM_SERVICE_NAME= +ELASTIC_APM_SECRET_TOKEN= +``` + +You now have basic error logging set up, and everything resulting in a 500 HTTP status code will be reported to the APM Server. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +::::{note} +The agent only captures and sends data if you have `DEBUG = False` in your settings. To force the agent to capture data in Django debug mode, set the [debug](/reference/configuration.md#config-debug) configuration option, e.g.: + +```python +ELASTIC_APM = { + 'SERVICE_NAME': '', + 'DEBUG': True, +} +``` + +:::: + + + +## Performance metrics [django-performance-metrics] + +In order to collect performance metrics, the agent automatically inserts a middleware at the top of your middleware list (`settings.MIDDLEWARE` in current versions of Django, `settings.MIDDLEWARE_CLASSES` in some older versions). To disable the automatic insertion of the middleware, see [django_autoinsert_middleware](/reference/configuration.md#config-django-autoinsert-middleware). + +::::{note} +For automatic insertion to work, your list of middlewares (`settings.MIDDLEWARE` or `settings.MIDDLEWARE_CLASSES`) must be of type `list` or `tuple`. +:::: + + +In addition to broad request metrics (what will appear in the APM app as transactions), the agent also collects fine grained metrics on template rendering, database queries, HTTP requests, etc. You can find more information on what we instrument in the [Automatic Instrumentation](/reference/supported-technologies.md#automatic-instrumentation) section. + + +### Instrumenting custom Python code [django-instrumenting-custom-python-code] + +To gain further insights into the performance of your code, please see [instrumenting custom code](/reference/instrumenting-custom-code.md). + + +### Ignoring specific views [django-ignoring-specific-views] + +You can use the `TRANSACTIONS_IGNORE_PATTERNS` configuration option to ignore specific views. The list given should be a list of regular expressions which are matched against the transaction name as seen in the Elastic APM user interface: + +```python +ELASTIC_APM['TRANSACTIONS_IGNORE_PATTERNS'] = ['^OPTIONS ', 'views.api.v2'] +``` + +This example ignores any requests using the `OPTIONS` method and any requests containing `views.api.v2`. + + +### Using the route as transaction name [django-transaction-name-route] + +By default, we use the function or class name of the view as the transaction name. Starting with Django 2.2, Django makes the route (e.g. `users//`) available on the `request.resolver_match` object. If you want to use the route instead of the view name as the transaction name, you can set the [`django_transaction_name_from_route`](/reference/configuration.md#config-django-transaction-name-from-route) config option to `true`. + +```python +ELASTIC_APM['DJANGO_TRANSACTION_NAME_FROM_ROUTE'] = True +``` + +::::{note} +in versions previous to Django 2.2, changing this setting will have no effect. +:::: + + + +### Integrating with the RUM Agent [django-integrating-with-the-rum-agent] + +To correlate performance measurement in the browser with measurements in your Django app, you can help the RUM (Real User Monitoring) agent by configuring it with the Trace ID and Span ID of the backend request. We provide a handy template context processor which adds all the necessary bits into the context of your templates. + +To enable this feature, first add the `rum_tracing` context processor to your `TEMPLATES` setting. You most likely already have a list of `context_processors`, in which case you can simply append ours to the list. + +```python +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'OPTIONS': { + 'context_processors': [ + # ... + 'elasticapm.contrib.django.context_processors.rum_tracing', + ], + }, + }, +] +``` + +Then, update the call to initialize the RUM agent (which probably happens in your base template) like this: + +```javascript +elasticApm.init({ + serviceName: "my-frontend-service", + pageLoadTraceId: "{{ apm.trace_id }}", + pageLoadSpanId: "{{ apm.span_id }}", + pageLoadSampled: {{ apm.is_sampled_js }} +}) +``` + +See the [JavaScript RUM agent documentation](apm-agent-rum-js://reference/index.md) for more information. + + +## Enabling and disabling the agent [django-enabling-and-disabling-the-agent] + +The easiest way to disable the agent is to set Django’s `DEBUG` option to `True` in your development configuration. No errors or metrics will be logged to Elastic APM. + +However, if during debugging you would like to force logging of errors to Elastic APM, then you can set `DEBUG` to `True` inside of the Elastic APM configuration dictionary, like this: + +```python +ELASTIC_APM = { + # ... + 'DEBUG': True, +} +``` + + +## Integrating with Python logging [django-logging] + +To easily send Python `logging` messages as "error" objects to Elasticsearch, we provide a `LoggingHandler` which you can use in your logging setup. The log messages will be enriched with a stack trace, data from the request, and more. + +::::{note} +the intended use case for this handler is to send high priority log messages (e.g. log messages with level `ERROR`) to Elasticsearch. For normal log shipping, we recommend using [filebeat](beats://reference/filebeat/index.md). +:::: + + +If you are new to how the `logging` module works together with Django, read more [in the Django documentation](https://docs.djangoproject.com/en/2.1/topics/logging/). + +An example of how your `LOGGING` setting could look: + +```python +LOGGING = { + 'version': 1, + 'disable_existing_loggers': True, + 'formatters': { + 'verbose': { + 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' + }, + }, + 'handlers': { + 'elasticapm': { + 'level': 'WARNING', + 'class': 'elasticapm.contrib.django.handlers.LoggingHandler', + }, + 'console': { + 'level': 'DEBUG', + 'class': 'logging.StreamHandler', + 'formatter': 'verbose' + } + }, + 'loggers': { + 'django.db.backends': { + 'level': 'ERROR', + 'handlers': ['console'], + 'propagate': False, + }, + 'mysite': { + 'level': 'WARNING', + 'handlers': ['elasticapm'], + 'propagate': False, + }, + # Log errors from the Elastic APM module to the console (recommended) + 'elasticapm.errors': { + 'level': 'ERROR', + 'handlers': ['console'], + 'propagate': False, + }, + }, +} +``` + +With this configuration, logging can be done like this in any module in the `myapp` django app: + +You can now use the logger in any module in the `myapp` Django app, for instance `myapp/views.py`: + +```python +import logging +logger = logging.getLogger('mysite') + +try: + instance = MyModel.objects.get(pk=42) +except MyModel.DoesNotExist: + logger.error( + 'Could not find instance, doing something else', + exc_info=True + ) +``` + +Note that `exc_info=True` adds the exception information to the data that gets sent to Elastic APM. Without it, only the message is sent. + + +### Extra data [django-extra-data] + +If you want to send more data than what you get with the agent by default, logging can be done like so: + +```python +import logging +logger = logging.getLogger('mysite') + +try: + instance = MyModel.objects.get(pk=42) +except MyModel.DoesNotExist: + logger.error( + 'There was some crazy error', + exc_info=True, + extra={ + 'datetime': str(datetime.now()), + } + ) +``` + + +## Celery integration [django-celery-integration] + +For a general guide on how to set up Django with Celery, head over to Celery’s [Django documentation](http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html#django-first-steps). + +Elastic APM will automatically log errors from your celery tasks, record performance data and keep the trace.id when the task is launched from an already started Elastic transaction. + + +## Logging "HTTP 404 Not Found" errors [django-logging-http-404-not-found-errors] + +By default, Elastic APM does not log HTTP 404 errors. If you wish to log these errors, add `'elasticapm.contrib.django.middleware.Catch404Middleware'` to `MIDDLEWARE` in your settings: + +```python +MIDDLEWARE = ( + # ... + 'elasticapm.contrib.django.middleware.Catch404Middleware', + # ... +) +``` + +Note that this middleware respects Django’s [`IGNORABLE_404_URLS`](https://docs.djangoproject.com/en/1.11/ref/settings/#ignorable-404-urls) setting. + + +## Disable the agent during tests [django-disable-agent-during-tests] + +To prevent the agent from sending any data to the APM Server during tests, set the `ELASTIC_APM_DISABLE_SEND` environment variable to `true`, e.g.: + +```python +ELASTIC_APM_DISABLE_SEND=true python manage.py test +``` + + +## Troubleshooting [django-troubleshooting] + +Elastic APM comes with a Django command that helps troubleshooting your setup. To check your configuration, run + +```bash +python manage.py elasticapm check +``` + +To send a test exception using the current settings, run + +```bash +python manage.py elasticapm test +``` + +If the command succeeds in sending a test exception, it will print a success message: + +```bash +python manage.py elasticapm test + +Trying to send a test error using these settings: + +SERVICE_NAME: +SECRET_TOKEN: +SERVER: http://127.0.0.1:8200 + +Success! We tracked the error successfully! You should be able to see it in a few seconds. +``` + + +## Supported Django and Python versions [supported-django-and-python-versions] + +A list of supported [Django](/reference/supported-technologies.md#supported-django) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + diff --git a/docs/reference/flask-support.md b/docs/reference/flask-support.md new file mode 100644 index 000000000..06f29712c --- /dev/null +++ b/docs/reference/flask-support.md @@ -0,0 +1,221 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/flask-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Flask support [flask-support] + +Getting Elastic APM set up for your Flask project is easy, and there are various ways you can tweak it to fit to your needs. + + +## Installation [flask-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install "elastic-apm[flask]" +``` + +or add `elastic-apm[flask]` to your project’s `requirements.txt` file. + +::::{note} +For apm-server 6.2+, make sure you use version 2.0 or higher of `elastic-apm`. +:::: + + +::::{note} +If you use Flask with uwsgi, make sure to [enable threads](http://uwsgi-docs.readthedocs.org/en/latest/Options.html#enable-threads) (enabled by default since 2.0.27) and [py-call-uwsgi-fork-hooks](https://uwsgi-docs.readthedocs.io/en/latest/Options.html#py-call-uwsgi-fork-hooks). +:::: + + +::::{note} +If you see an error log that mentions `psutil not found`, you can install `psutil` using `pip install psutil`, or add `psutil` to your `requirements.txt` file. +:::: + + + +## Setup [flask-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, the application’s settings, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To initialize the agent for your application using environment variables: + +```python +from elasticapm.contrib.flask import ElasticAPM + +app = Flask(__name__) + +apm = ElasticAPM(app) +``` + +To configure the agent using `ELASTIC_APM` in your application’s settings: + +```python +from elasticapm.contrib.flask import ElasticAPM + +app.config['ELASTIC_APM'] = { + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', +} +apm = ElasticAPM(app) +``` + +The final option is to initialize the agent with the settings as arguments: + +```python +from elasticapm.contrib.flask import ElasticAPM + +apm = ElasticAPM(app, service_name='', secret_token='') +``` + + +### Debug mode [flask-debug-mode] + +::::{note} +Please note that errors and transactions will only be sent to the APM Server if your app is **not** in [Flask debug mode](https://flask.palletsprojects.com/en/3.0.x/quickstart/#debug-mode). +:::: + + +To force the agent to send data while the app is in debug mode, set the value of `DEBUG` in the `ELASTIC_APM` dictionary to `True`: + +```python +app.config['ELASTIC_APM'] = { + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', + 'DEBUG': True +} +``` + + +### Building applications on the fly? [flask-building-applications-on-the-fly] + +You can use the agent’s `init_app` hook for adding the application on the fly: + +```python +from elasticapm.contrib.flask import ElasticAPM +apm = ElasticAPM() + +def create_app(): + app = Flask(__name__) + apm.init_app(app, service_name='', secret_token='') + return app +``` + + +## Usage [flask-usage] + +Once you have configured the agent, it will automatically track transactions and capture uncaught exceptions within Flask. If you want to send additional events, a couple of shortcuts are provided on the ElasticAPM Flask middleware object by raising an exception or logging a generic message. + +Capture an arbitrary exception by calling `capture_exception`: + +```python +try: + 1 / 0 +except ZeroDivisionError: + apm.capture_exception() +``` + +Log a generic message with `capture_message`: + +```python +apm.capture_message('hello, world!') +``` + + +## Shipping Logs to Elasticsearch [flask-logging] + +This feature has been deprecated and will be removed in a future version. + +Please see our [Logging](/reference/logs.md) documentation for other supported ways to ship logs to Elasticsearch. + +Note that you can always send exceptions and messages to the APM Server with [`capture_exception`](/reference/api-reference.md#client-api-capture-exception) and and [`capture_message`](/reference/api-reference.md#client-api-capture-message). + +```python +from elasticapm import get_client + +@app.route('/') +def bar(): + try: + 1 / 0 + except ZeroDivisionError: + get_client().capture_exception() +``` + + +### Extra data [flask-extra-data] + +In addition to what the agents log by default, you can send extra information: + +```python +@app.route('/') +def bar(): + try: + 1 / 0 + except ZeroDivisionError: + app.logger.error('Math is hard', + exc_info=True, + extra={ + 'good_at_math': False, + } + ) + ) +``` + + +### Celery tasks [flask-celery-tasks] + +The Elastic APM agent will automatically send errors and performance data from your Celery tasks to the APM Server. + + +## Performance metrics [flask-performance-metrics] + +If you’ve followed the instructions above, the agent has already hooked into the right signals and should be reporting performance metrics. + + +### Ignoring specific routes [flask-ignoring-specific-views] + +You can use the [`TRANSACTIONS_IGNORE_PATTERNS`](/reference/configuration.md#config-transactions-ignore-patterns) configuration option to ignore specific routes. The list given should be a list of regular expressions which are matched against the transaction name: + +```python +app.config['ELASTIC_APM'] = { + ... + 'TRANSACTIONS_IGNORE_PATTERNS': ['^OPTIONS ', '/api/'] + ... +} +``` + +This would ignore any requests using the `OPTIONS` method and any requests containing `/api/`. + + +### Integrating with the RUM Agent [flask-integrating-with-the-rum-agent] + +To correlate performance measurement in the browser with measurements in your Flask app, you can help the RUM (Real User Monitoring) agent by configuring it with the Trace ID and Span ID of the backend request. We provide a handy template context processor which adds all the necessary bits into the context of your templates. + +The context processor is installed automatically when you initialize `ElasticAPM`. All that is left to do is to update the call to initialize the RUM agent (which probably happens in your base template) like this: + +```javascript +elasticApm.init({ + serviceName: "my-frontend-service", + pageLoadTraceId: "{{ apm["trace_id"] }}", + pageLoadSpanId: "{{ apm["span_id"]() }}", + pageLoadSampled: {{ apm["is_sampled_js"] }} +}) +``` + +See the [JavaScript RUM agent documentation](apm-agent-rum-js://reference/index.md) for more information. + + +## Supported Flask and Python versions [supported-flask-and-python-versions] + +A list of supported [Flask](/reference/supported-technologies.md#supported-flask) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + diff --git a/docs/reference/how-agent-works.md b/docs/reference/how-agent-works.md new file mode 100644 index 000000000..f8876cb32 --- /dev/null +++ b/docs/reference/how-agent-works.md @@ -0,0 +1,58 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/how-the-agent-works.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# How the Agent works [how-the-agent-works] + +To gather APM events (called transactions and spans), errors and metrics, the Python agent instruments your application in a few different ways. These events, are then sent to the APM Server. The APM Server converts them to a format suitable for Elasticsearch, and sends them to an Elasticsearch cluster. You can then use the APM app in Kibana to gain insight into latency issues and error culprits within your application. + +Broadly, we differentiate between three different approaches to collect the necessary data: framework integration, instrumentation, and background collection. + + +## Framework integration [how-it-works-framework-integration] + +To collect data about incoming requests and background tasks, we integrate with frameworks like [Django](/reference/django-support.md), [Flask](/reference/flask-support.md) and Celery. Whenever possible, framework integrations make use of hooks and signals provided by the framework. Examples of this are: + +* `request_started`, `request_finished`, and `got_request_exception` signals from `django.core.signals` +* `request_started`, `request_finished`, and `got_request_exception` signals from `flask.signals` +* `task_prerun`, `task_postrun`, and `task_failure` signals from `celery.signals` + +Framework integrations require some limited code changes in your app. E.g. for Django, you need to add `elasticapm.contrib.django` to `INSTALLED_APPS`. + + +## What if you are not using a framework [how-it-works-no-framework] + +If you’re not using a supported framework, for example, a simple Python script, you can still leverage the agent’s [automatic instrumentation](/reference/supported-technologies.md#automatic-instrumentation). Check out our docs on [instrumenting custom code](/reference/instrumenting-custom-code.md). + + +## Instrumentation [how-it-works-instrumentation] + +To collect data from database drivers, HTTP libraries etc., we instrument certain functions and methods in these libraries. Our instrumentation wraps these callables and collects additional data, like + +* time spent in the call +* the executed query for database drivers +* the fetched URL for HTTP libraries + +We use a 3rd party library, [`wrapt`](https://github.com/GrahamDumpleton/wrapt), to wrap the callables. You can read more on how `wrapt` works in Graham Dumpleton’s excellent series of [blog posts](https://grahamdumpleton.me/posts/?search=wrapt). + +Instrumentations are set up automatically and do not require any code changes. See [Automatic Instrumentation](/reference/supported-technologies.md#automatic-instrumentation) to learn more about which libraries we support. + + +## Background collection [how-it-works-background-collection] + +In addition to APM and error data, the Python agent also collects system and application metrics in regular intervals. This collection happens in a background thread that is started by the agent. + +In addition to the metrics collection background thread, the agent starts two additional threads per process: + +* a thread to regularly fetch remote configuration from the APM Server +* a thread to process the collected data and send it to the APM Server via HTTP. + +Note that every process that instantiates the agent will have these three threads. This means that when you e.g. use gunicorn or uwsgi workers, each worker will have three threads started by the Python agent. + diff --git a/docs/reference/images/choose-a-layer.png b/docs/reference/images/choose-a-layer.png new file mode 100644 index 000000000..49cfd9917 Binary files /dev/null and b/docs/reference/images/choose-a-layer.png differ diff --git a/docs/reference/images/config-layer.png b/docs/reference/images/config-layer.png new file mode 100644 index 000000000..ec6c045d3 Binary files /dev/null and b/docs/reference/images/config-layer.png differ diff --git a/docs/images/dynamic-config.svg b/docs/reference/images/dynamic-config.svg similarity index 100% rename from docs/images/dynamic-config.svg rename to docs/reference/images/dynamic-config.svg diff --git a/docs/reference/images/python-lambda-env-vars.png b/docs/reference/images/python-lambda-env-vars.png new file mode 100644 index 000000000..cbf4a25d2 Binary files /dev/null and b/docs/reference/images/python-lambda-env-vars.png differ diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 000000000..df1c5bf30 --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,37 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/getting-started.html + - https://www.elastic.co/guide/en/apm/agent/python/current/index.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# APM Python agent [getting-started] + +The Elastic APM Python agent sends performance metrics and error logs to the APM Server. It has built-in support for Django and Flask performance metrics and error logging, as well as generic support of other WSGI frameworks for error logging. + + +## How does the Agent work? [how-it-works] + +The Python Agent instruments your application to collect APM events in a few different ways: + +To collect data about incoming requests and background tasks, the Agent integrates with [supported technologies](/reference/supported-technologies.md) to make use of hooks and signals provided by the framework. These framework integrations require limited code changes in your application. + +To collect data from database drivers, HTTP libraries etc., we instrument certain functions and methods in these libraries. Instrumentations are set up automatically and do not require any code changes. + +In addition to APM and error data, the Python agent also collects system and application metrics in regular intervals. This collection happens in a background thread that is started by the agent. + +More detailed information on how the Agent works can be found in the [advanced topics](/reference/how-agent-works.md). + + +## Additional components [additional-components] + +APM Agents work in conjunction with the [APM Server](docs-content://solutions/observability/apm/index.md), [Elasticsearch](docs-content://get-started/introduction.md#what-is-es), and [Kibana](docs-content://get-started/introduction.md#what-is-kib). The [APM documentation](docs-content://solutions/observability/apm/index.md) provides details on how these components work together, and provides a matrix outlining [Agent and Server compatibility](docs-content://solutions/observability/apm/apm-agent-compatibility.md). + +## Troubleshooting + +If you're experiencing issues with the APM Python agent, refer to [Troubleshoot APM Python Agent](docs-content://troubleshoot/observability/apm-agent-python/apm-python-agent.md). \ No newline at end of file diff --git a/docs/reference/instrumenting-custom-code.md b/docs/reference/instrumenting-custom-code.md new file mode 100644 index 000000000..43b7ff14b --- /dev/null +++ b/docs/reference/instrumenting-custom-code.md @@ -0,0 +1,124 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/instrumenting-custom-code.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Instrumenting custom code [instrumenting-custom-code] + + +## Creating Additional Spans in a Transaction [instrumenting-custom-code-spans] + +Elastic APM instruments a variety of libraries out of the box, but sometimes you need to know how long a specific function took or how often it gets called. + +Assuming you’re using one of our [supported frameworks](/reference/set-up-apm-python-agent.md), you can apply the `@elasticapm.capture_span()` decorator to achieve exactly that. If you’re not using a supported framework, see [Creating New Transactions](#instrumenting-custom-code-transactions). + +`elasticapm.capture_span` can be used either as a decorator or as a context manager. The following example uses it both ways: + +```python +import elasticapm + +@elasticapm.capture_span() +def coffee_maker(strength): + fetch_water() + + with elasticapm.capture_span('near-to-machine'): + insert_filter() + for i in range(strength): + pour_coffee() + + start_drip() + + fresh_pots() +``` + +Similarly, you can use `elasticapm.async_capture_span` for instrumenting `async` workloads: + +```python +import elasticapm + +@elasticapm.async_capture_span() +async def coffee_maker(strength): + await fetch_water() + + async with elasticapm.async_capture_span('near-to-machine'): + await insert_filter() + async for i in range(strength): + await pour_coffee() + + start_drip() + + fresh_pots() +``` + +::::{note} +`asyncio` support is only available in Python 3.7+. +:::: + + +See [the API docs](/reference/api-reference.md#api-capture-span) for more information on `capture_span`. + + +## Creating New Transactions [instrumenting-custom-code-transactions] + +It’s important to note that `elasticapm.capture_span` only works if there is an existing transaction. If you’re not using one of our [supported frameworks](/reference/set-up-apm-python-agent.md), you need to create a `Client` object and begin and end the transactions yourself. You can even utilize the agent’s [automatic instrumentation](/reference/supported-technologies.md#automatic-instrumentation)! + +To collect the spans generated by the supported libraries, you need to invoke `elasticapm.instrument()` (just once, at the initialization stage of your application) and create at least one transaction. It is up to you to determine what you consider a transaction within your application — it can be the whole execution of the script or a part of it. + +The example below will consider the whole execution as a single transaction with two HTTP request spans in it. The config for `elasticapm.Client` can be passed in programmatically, and it will also utilize any config environment variables available to it automatically. + +```python +import requests +import time +import elasticapm + +def main(): + sess = requests.Session() + for url in [ 'https://www.elastic.co', 'https://benchmarks.elastic.co' ]: + resp = sess.get(url) + time.sleep(1) + +if __name__ == '__main__': + client = elasticapm.Client(service_name="foo", server_url="https://example.com:8200") + elasticapm.instrument() # Only call this once, as early as possible. + client.begin_transaction(transaction_type="script") + main() + client.end_transaction(name=__name__, result="success") +``` + +Note that you don’t need to do anything to send the data — the `Client` object will handle that before the script exits. Additionally, the `Client` object should be treated as a singleton — you should only create one instance and store/pass around that instance for all transaction handling. + + +## Distributed Tracing [instrumenting-custom-code-distributed-tracing] + +When instrumenting custom code across multiple services, you should propagate the TraceParent where possible. This allows Elastic APM to bundle the various transactions into a single distributed trace. The Python Agent will automatically add TraceParent information to the headers of outgoing HTTP requests, which can then be used on the receiving end to add that TraceParent information to new manually-created transactions. + +Additionally, the Python Agent provides utilities for propagating the TraceParent in string format. + +```python +import elasticapm + +client = elasticapm.Client(service_name="foo", server_url="https://example.com:8200") + +# Retrieve the current TraceParent as a string, requires active transaction +traceparent_string = elasticapm.get_trace_parent_header() + +# Create a TraceParent object from a string and use it for a new transaction +parent = elasticapm.trace_parent_from_string(traceparent_string) +client.begin_transaction(transaction_type="script", trace_parent=parent) +# Do some work +client.end_transaction(name=__name__, result="success") + +# Create a TraceParent object from a dictionary of headers, provided +# automatically by the sending service if it is using an Elastic APM Agent. +parent = elasticapm.trace_parent_from_headers(headers_dict) +client.begin_transaction(transaction_type="script", trace_parent=parent) +# Do some work +client.end_transaction(name=__name__, result="success") +``` + diff --git a/docs/reference/lambda-support.md b/docs/reference/lambda-support.md new file mode 100644 index 000000000..12f9922e3 --- /dev/null +++ b/docs/reference/lambda-support.md @@ -0,0 +1,270 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/lambda-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +sub: + apm-lambda-ext-v: ver-1-6-0 + apm-python-v: ver-6-25-0 + apm-python-layer-v: 1 +--- + +# Monitoring AWS Lambda Python Functions [lambda-support] + +The Python APM Agent can be used with AWS Lambda to monitor the execution of your AWS Lambda functions. + +:::{note} +The Centralized Agent Configuration on the Elasticsearch APM currently does NOT support AWS Lambda. +::: + + +## Prerequisites [_prerequisites] + +You need an APM Server to send APM data to. Follow the [APM Quick start](docs-content://solutions/observability/apm/get-started-fleet-managed-apm-server.md) if you have not set one up yet. For the best-possible performance, we recommend setting up APM on {{ecloud}} in the same AWS region as your AWS Lambda functions. + +## Step 1: Add the APM Layers to your Lambda function [add_the_apm_layers_to_your_lambda_function] + +Both the [{{apm-lambda-ext}}](apm-aws-lambda://reference/index.md) and the Python APM Agent are added to your Lambda function as [AWS Lambda Layers](https://docs.aws.amazon.com/lambda/latest/dg/invocation-layers.html). Therefore, you need to add the corresponding Layer ARNs (identifiers) to your Lambda function. + +:::::::{tab-set} + +::::::{tab-item} AWS Web Console +To add the layers to your Lambda function through the AWS Management Console: + +1. Navigate to your function in the AWS Management Console +2. Scroll to the Layers section and click the *Add a layer* button ![image of layer configuration section in AWS Console](images/config-layer.png "") +3. Choose the *Specify an ARN* radio button +4. Copy and paste the following ARNs of the {{apm-lambda-ext}} layer and the APM agent layer in the *Specify an ARN* text input: + * APM Extension layer: + ```bash subs=true + arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-extension-{{apm-lambda-ext-v}}-{ARCHITECTURE}:1 <1> + ``` + 1. Replace `{AWS_REGION}` with the AWS region of your Lambda function and `{ARCHITECTURE}` with its architecture. + + * APM agent layer: + ```bash subs=true + arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-python-{{apm-python-v}}:{{apm-python-layer-v}} <1> + ``` + 1. Replace `{AWS_REGION}` with the AWS region of your Lambda function. + + ![image of choosing a layer in AWS Console](images/choose-a-layer.png "") +5. Click the *Add* button +:::::: + +::::::{tab-item} AWS CLI +To add the Layer ARNs of the {{apm-lambda-ext}} and the APM agent through the AWS command line interface execute the following command: + +```bash subs=true +aws lambda update-function-configuration --function-name yourLambdaFunctionName \ +--layers arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-extension-{{apm-lambda-ext-v}}-{ARCHITECTURE}:1 \ <1> +arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-python-{{apm-python-v}}:{{apm-python-layer-v}} <2> +``` +1. Replace `{AWS_REGION}` with the AWS region of your Lambda function and `{ARCHITECTURE}` with its architecture. +2. Replace `{AWS_REGION}` with the AWS region of your Lambda function. +:::::: + +::::::{tab-item} SAM +In your SAM `template.yml` file add the Layer ARNs of the {{apm-lambda-ext}} and the APM agent as follows: + +```yaml subs=true +... +Resources: + yourLambdaFunction: + Type: AWS::Serverless::Function + Properties: + ... + Layers: + - arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-extension-{{apm-lambda-ext-v}}-{ARCHITECTURE}:1 <1> + - arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-python-{{apm-python-v}}:{{apm-python-layer-v}} <2> +... +``` +1. Replace `{AWS_REGION}` with the AWS region of your Lambda function and `{ARCHITECTURE}` with its architecture. +2. Replace `{AWS_REGION}` with the AWS region of your Lambda function. +:::::: + +::::::{tab-item} Serverless +In your `serverless.yml` file add the Layer ARNs of the {{apm-lambda-ext}} and the APM agent to your function as follows: + +```yaml subs=true +... +functions: + yourLambdaFunction: + handler: ... + layers: + - arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-extension-{{apm-lambda-ext-v}}-{ARCHITECTURE}:1 <1> + - arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-python-{{apm-python-v}}:{{apm-python-layer-v}} <2> +... +``` +1. Replace `{AWS_REGION}` with the AWS region of your Lambda function and `{ARCHITECTURE}` with its architecture. +2. Replace `{AWS_REGION}` with the AWS region of your Lambda function. +:::::: + +::::::{tab-item} Terraform +To add the{{apm-lambda-ext}} and the APM agent to your function add the ARNs to the `layers` property in your Terraform file: + +```yaml subs=true +... +resource "aws_lambda_function" "your_lambda_function" { + ... + layers = ["arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-extension-{{apm-lambda-ext-v}}-{ARCHITECTURE}:1", "arn:aws:lambda:{AWS_REGION}:267093732750:layer:elastic-apm-python-{{apm-python-v}}:{{apm-python-layer-v}}"] <1> +} +... +``` +1. Replace `{AWS_REGION}` with the AWS region of your Lambda function and `{ARCHITECTURE}` with its architecture. +:::::: + +::::::{tab-item} Container Image +To add the {{apm-lambda-ext}} and the APM agent to your container-based function extend the Dockerfile of your function image as follows: + +```Dockerfile +FROM docker.elastic.co/observability/apm-lambda-extension-{IMAGE_ARCH}:latest AS lambda-extension <1> +FROM docker.elastic.co/observability/apm-agent-python:latest AS python-agent + +# FROM ... <-- this is the base image of your Lambda function + +COPY --from=lambda-extension /opt/elastic-apm-extension /opt/extensions/elastic-apm-extension +COPY --from=python-agent /opt/python/ /opt/python/ + +# ... +``` +1. Replace `{IMAGE_ARCH}` with the architecture of the image. +:::::: + +::::::: + +## Step 2: Configure APM on AWS Lambda [configure_apm_on_aws_lambda] + +The {{apm-lambda-ext}} and the APM Python agent are configured through environment variables on the AWS Lambda function. + +For the minimal configuration, you will need the *APM Server URL* to set the destination for APM data and an [APM Secret Token](docs-content://solutions/observability/apm/secret-token.md). If you prefer to use an [APM API key](docs-content://solutions/observability/apm/api-keys.md) instead of the APM secret token, use the `ELASTIC_APM_API_KEY` environment variable instead of `ELASTIC_APM_SECRET_TOKEN` in the following configuration. + +For production environments, we recommend [using the AWS Secrets Manager to store your APM authentication key](apm-aws-lambda://reference/aws-lambda-secrets-manager.md) instead of providing the secret value as plaintext in the environment variables. + +:::::::{tab-set} + +::::::{tab-item} AWS Web Console +To configure APM through the AWS Management Console: + +1. Navigate to your function in the AWS Management Console +2. Click on the *Configuration* tab +3. Click on *Environment variables* +4. Add the following required variables: + +```bash +AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda <1> +ELASTIC_APM_LAMBDA_APM_SERVER = <2> +ELASTIC_APM_SECRET_TOKEN = <3> +ELASTIC_APM_SEND_STRATEGY = background <4> +``` + +1. Use this exact fixed value. +2. This is your APM Server URL. +3. This is your APM secret token. +4. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. + +![Python environment variables configuration section in AWS Console](images/python-lambda-env-vars.png "") +:::::: + +::::::{tab-item} AWS CLI +To configure APM through the AWS command line interface execute the following command: + +```bash +aws lambda update-function-configuration --function-name yourLambdaFunctionName \ + --environment "Variables={AWS_LAMBDA_EXEC_WRAPPER=/opt/python/bin/elasticapm-lambda,ELASTIC_APM_LAMBDA_APM_SERVER=,ELASTIC_APM_SECRET_TOKEN=,ELASTIC_APM_SEND_STRATEGY=background}" <1> +``` +1. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. +:::::: + +::::::{tab-item} SAM +In your SAM `template.yml` file configure the following environment variables: + +```yaml +... +Resources: + yourLambdaFunction: + Type: AWS::Serverless::Function + Properties: + ... + Environment: + Variables: + AWS_LAMBDA_EXEC_WRAPPER: /opt/python/bin/elasticapm-lambda + ELASTIC_APM_LAMBDA_APM_SERVER: + ELASTIC_APM_SECRET_TOKEN: + ELASTIC_APM_SEND_STRATEGY: background <1> +... +``` + +1. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. + +:::::: + +::::::{tab-item} Serverless +In your `serverless.yml` file configure the following environment variables: + +```yaml +... +functions: + yourLambdaFunction: + ... + environment: + AWS_LAMBDA_EXEC_WRAPPER: /opt/python/bin/elasticapm-lambda + ELASTIC_APM_LAMBDA_APM_SERVER: + ELASTIC_APM_SECRET_TOKEN: + ELASTIC_APM_SEND_STRATEGY: background <1> +... +``` + +1. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. + +:::::: + +::::::{tab-item} Terraform +In your Terraform file configure the following environment variables: + +```yaml +... +resource "aws_lambda_function" "your_lambda_function" { + ... + environment { + variables = { + AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda + ELASTIC_APM_LAMBDA_APM_SERVER = "" + ELASTIC_APM_SECRET_TOKEN = "" + ELASTIC_APM_SEND_STRATEGY = "background" <1> + } + } +} +... +``` + +1. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. + +:::::: + +::::::{tab-item} Container Image +Environment variables configured for an AWS Lambda function are passed to the container running the lambda function. You can use one of the other options (through AWS Web Console, AWS CLI, etc.) to configure the following environment variables: + +```bash +AWS_LAMBDA_EXEC_WRAPPER = /opt/python/bin/elasticapm-lambda <1> +ELASTIC_APM_LAMBDA_APM_SERVER = <2> +ELASTIC_APM_SECRET_TOKEN = <3> +ELASTIC_APM_SEND_STRATEGY = background <4> +``` + +1. Use this exact fixed value. +2. This is your APM Server URL. +3. This is your APM secret token. +4. The [ELASTIC_APM_SEND_STRATEGY](apm-aws-lambda://reference/aws-lambda-config-options.md#_elastic_apm_send_strategy) defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the background strategy in production environments with steady load scenarios. + +:::::: + +::::::: + +You can optionally [fine-tune the Python agent](/reference/configuration.md) or the [configuration of the {{apm-lambda-ext}}](apm-aws-lambda://reference/aws-lambda-config-options.md). + +That’s it. After following the steps above, you’re ready to go! Your Lambda function invocations should be traced from now on. Spans will be captured for [supported technologies](/reference/supported-technologies.md). You can also use [`capture_span`](/reference/api-reference.md#api-capture-span) to capture custom spans, and you can retrieve the `Client` object for capturing exceptions/messages using [`get_client`](/reference/api-reference.md#api-get-client). + diff --git a/docs/reference/logs.md b/docs/reference/logs.md new file mode 100644 index 000000000..22fbde3a3 --- /dev/null +++ b/docs/reference/logs.md @@ -0,0 +1,147 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/logs.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Logs [logs] + +Elastic Python APM Agent provides the following log features: + +* [Log correlation](#log-correlation-ids) : Automatically inject correlation IDs that allow navigation between logs, traces and services. +* [Log reformatting (experimental)](#log-reformatting) : Automatically reformat plaintext logs in [ECS logging](ecs-logging://reference/intro.md) format. + +::::{note} +Elastic Python APM Agent does not send the logs to Elasticsearch. It only injects correlation IDs and reformats the logs. You must use another ingestion strategy. We recommend [Filebeat](https://www.elastic.co/beats/filebeat) for that purpose. +:::: + + +Those features are part of [Application log ingestion strategies](docs-content://solutions/observability/logs/stream-application-logs.md). + +The [`ecs-logging-python`](ecs-logging-python://reference/index.md) library can also be used to use the [ECS logging](ecs-logging://reference/intro.md) format without an APM agent. When deployed with the Python APM agent, the agent will provide [log correlation](#log-correlation-ids) IDs. + + +## Log correlation [log-correlation-ids] + +[Log correlation](docs-content://solutions/observability/logs/stream-application-logs.md) allows you to navigate to all logs belonging to a particular trace and vice-versa: for a specific log, see in which context it has been logged and which parameters the user provided. + +The Agent provides integrations with both the default Python logging library, as well as [`structlog`](http://www.structlog.org/en/stable/). + +* [Logging integrations](#logging-integrations) +* [Log correlation in Elasticsearch](#log-correlation-in-es) + + +### Logging integrations [logging-integrations] + + +#### `logging` [logging] + +We use [`logging.setLogRecordFactory()`](https://docs.python.org/3/library/logging.html#logging.setLogRecordFactory) to decorate the default LogRecordFactory to automatically add new attributes to each LogRecord object: + +* `elasticapm_transaction_id` +* `elasticapm_trace_id` +* `elasticapm_span_id` + +This factory also adds these fields to a dictionary attribute, `elasticapm_labels`, using the official ECS [tracing fields](ecs://reference/ecs-tracing.md). + +You can disable this automatic behavior by using the [`disable_log_record_factory`](/reference/configuration.md#config-generic-disable-log-record-factory) setting in your configuration. + + +#### `structlog` [structlog] + +We provide a [processor](http://www.structlog.org/en/stable/processors.html) for [`structlog`](http://www.structlog.org/en/stable/) which will add three new keys to the event_dict of any processed event: + +* `transaction.id` +* `trace.id` +* `span.id` + +```python +from structlog import PrintLogger, wrap_logger +from structlog.processors import JSONRenderer +from elasticapm.handlers.structlog import structlog_processor + +wrapped_logger = PrintLogger() +logger = wrap_logger(wrapped_logger, processors=[structlog_processor, JSONRenderer()]) +log = logger.new() +log.msg("some_event") +``` + + +#### Use structlog for agent-internal logging [_use_structlog_for_agent_internal_logging] + +The Elastic APM Python agent uses logging to log internal events and issues. By default, it will use a `logging` logger. If your project uses structlog, you can tell the agent to use a structlog logger by setting the environment variable `ELASTIC_APM_USE_STRUCTLOG` to `true`. + + +## Log correlation in Elasticsearch [log-correlation-in-es] + +In order to correlate logs from your app with transactions captured by the Elastic APM Python Agent, your logs must contain one or more of the following identifiers: + +* `transaction.id` +* `trace.id` +* `span.id` + +If you’re using structured logging, either [with a custom solution](https://docs.python.org/3/howto/logging-cookbook.html#implementing-structured-logging) or with [structlog](http://www.structlog.org/en/stable/) (recommended), then this is fairly easy. Throw the [JSONRenderer](http://www.structlog.org/en/stable/api.html#structlog.processors.JSONRenderer) in, and use [Filebeat](https://www.elastic.co/blog/structured-logging-filebeat) to pull these logs into Elasticsearch. + +Without structured logging the task gets a little trickier. Here we recommend first making sure your LogRecord objects have the elasticapm attributes (see [`logging`](#logging)), and then you’ll want to combine some specific formatting with a Grok pattern, either in Elasticsearch using [the grok processor](elasticsearch://reference/enrich-processor/grok-processor.md), or in [logstash with a plugin](logstash-docs-md://lsr/plugins-filters-grok.md). + +Say you have a [Formatter](https://docs.python.org/3/library/logging.html#logging.Formatter) that looks like this: + +```python +import logging + +fh = logging.FileHandler('spam.log') +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +fh.setFormatter(formatter) +``` + +You can add the APM identifiers by simply switching out the `Formatter` object for the one that we provide: + +```python +import logging +from elasticapm.handlers.logging import Formatter + +fh = logging.FileHandler('spam.log') +formatter = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +fh.setFormatter(formatter) +``` + +This will automatically append apm-specific fields to your format string: + +```python +formatstring = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" +formatstring = formatstring + " | elasticapm " \ + "transaction.id=%(elasticapm_transaction_id)s " \ + "trace.id=%(elasticapm_trace_id)s " \ + "span.id=%(elasticapm_span_id)s" +``` + +Then, you could use a grok pattern like this (for the [Elasticsearch Grok Processor](elasticsearch://reference/enrich-processor/grok-processor.md)): + +```json +{ + "description" : "...", + "processors": [ + { + "grok": { + "field": "message", + "patterns": ["%{GREEDYDATA:msg} | elasticapm transaction.id=%{DATA:transaction.id} trace.id=%{DATA:trace.id} span.id=%{DATA:span.id}"] + } + } + ] +} +``` + + +## Log reformatting (experimental) [log-reformatting] + +Starting in version 6.16.0, the agent can automatically reformat application logs to ECS format with no changes to dependencies. Prior versions must install the `ecs_logging` dependency. + +Log reformatting is controlled by the [`log_ecs_reformatting`](/reference/configuration.md#config-log_ecs_reformatting) configuration option, and is disabled by default. + +The reformatted logs will include both the [trace and service correlation](#log-correlation-ids) IDs. + diff --git a/docs/reference/metrics.md b/docs/reference/metrics.md new file mode 100644 index 000000000..af4222c0b --- /dev/null +++ b/docs/reference/metrics.md @@ -0,0 +1,191 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/metrics.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Metrics [metrics] + +With Elastic APM, you can capture system and process metrics. These metrics will be sent regularly to the APM Server and from there to Elasticsearch + + +## Metric sets [metric-sets] + +* [CPU/Memory metric set](#cpu-memory-metricset) +* [Breakdown metric set](#breakdown-metricset) +* [Prometheus metric set (beta)](#prometheus-metricset) +* [Custom Metrics](#custom-metrics) + + +### CPU/Memory metric set [cpu-memory-metricset] + +`elasticapm.metrics.sets.cpu.CPUMetricSet` + +This metric set collects various system metrics and metrics of the current process. + +::::{note} +if you do **not** use Linux, you need to install [`psutil`](https://pypi.org/project/psutil/) for this metric set. +:::: + + +**`system.cpu.total.norm.pct`** +: type: scaled_float + +format: percent + +The percentage of CPU time in states other than Idle and IOWait, normalized by the number of cores. + + +**`system.process.cpu.total.norm.pct`** +: type: scaled_float + +format: percent + +The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. + + +**`system.memory.total`** +: type: long + +format: bytes + +Total memory. + + +**`system.memory.actual.free`** +: type: long + +format: bytes + +Actual free memory in bytes. + + +**`system.process.memory.size`** +: type: long + +format: bytes + +The total virtual memory the process has. + + +**`system.process.memory.rss.bytes`** +: type: long + +format: bytes + +The Resident Set Size. The amount of memory the process occupied in main memory (RAM). + + + +#### Linux’s cgroup metrics [cpu-memory-cgroup-metricset] + +**`system.process.cgroup.memory.mem.limit.bytes`** +: type: long + +format: bytes + +Memory limit for current cgroup slice. + + +**`system.process.cgroup.memory.mem.usage.bytes`** +: type: long + +format: bytes + +Memory usage in current cgroup slice. + + + +### Breakdown metric set [breakdown-metricset] + +::::{note} +Tracking and collection of this metric set can be disabled using the [`breakdown_metrics`](/reference/configuration.md#config-breakdown_metrics) setting. +:::: + + +**`span.self_time`** +: type: simple timer + +This timer tracks the span self-times and is the basis of the transaction breakdown visualization. + +Fields: + +* `sum`: The sum of all span self-times in ms since the last report (the delta) +* `count`: The count of all span self-times since the last report (the delta) + +You can filter and group by these dimensions: + +* `transaction.name`: The name of the transaction +* `transaction.type`: The type of the transaction, for example `request` +* `span.type`: The type of the span, for example `app`, `template` or `db` +* `span.subtype`: The sub-type of the span, for example `mysql` (optional) + + + +### Prometheus metric set (beta) [prometheus-metricset] + +::::{warning} +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. +:::: + + +If you use [`prometheus_client`](https://github.com/prometheus/client_python) to collect metrics, the agent can collect them as well and make them available in Elasticsearch. + +The following types of metrics are supported: + +* Counters +* Gauges +* Summaries +* Histograms (requires APM Server / Elasticsearch / Kibana 7.14+) + +To use the Prometheus metric set, you have to enable it with the [`prometheus_metrics`](/reference/configuration.md#config-prometheus_metrics) configuration option. + +All metrics collected from `prometheus_client` are prefixed with `"prometheus.metrics."`. This can be changed using the [`prometheus_metrics_prefix`](/reference/configuration.md#config-prometheus_metrics_prefix) configuration option. + + +#### Beta limitations [prometheus-metricset-beta] + +* The metrics format may change without backwards compatibility in future releases. + + +## Custom Metrics [custom-metrics] + +Custom metrics allow you to send your own metrics to Elasticsearch. + +The most common way to send custom metrics is with the [Prometheus metric set](#prometheus-metricset). However, you can also use your own metric set. If you collect the metrics manually in your code, you can use the base `MetricSet` class: + +```python +from elasticapm.metrics.base_metrics import MetricSet + +client = elasticapm.Client() +metricset = client.metrics.register(MetricSet) + +for x in range(10): + metricset.counter("my_counter").inc() +``` + +Alternatively, you can create your own MetricSet class which inherits from the base class. In this case, you’ll usually want to override the `before_collect` method, where you can gather and set metrics before they are collected and sent to Elasticsearch. + +You can add your `MetricSet` class as shown in the example above, or you can add an import string for your class to the [`metrics_sets`](/reference/configuration.md#config-metrics_sets) configuration option: + +```bash +ELASTIC_APM_METRICS_SETS="elasticapm.metrics.sets.cpu.CPUMetricSet,myapp.metrics.MyMetricSet" +``` + +Your MetricSet might look something like this: + +```python +from elasticapm.metrics.base_metrics import MetricSet + +class MyAwesomeMetricSet(MetricSet): + def before_collect(self): + self.gauge("my_gauge").set(myapp.some_value) +``` + +In the example above, the MetricSet would look up `myapp.some_value` and set the metric `my_gauge` to that value. This would happen whenever metrics are collected/sent, which is controlled by the [`metrics_interval`](/reference/configuration.md#config-metrics_interval) setting. + diff --git a/docs/reference/opentelemetry-api-bridge.md b/docs/reference/opentelemetry-api-bridge.md new file mode 100644 index 000000000..704d86ab5 --- /dev/null +++ b/docs/reference/opentelemetry-api-bridge.md @@ -0,0 +1,69 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/opentelemetry-bridge.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# OpenTelemetry API Bridge [opentelemetry-bridge] + +The Elastic APM OpenTelemetry bridge allows you to create Elastic APM `Transactions` and `Spans`, using the OpenTelemetry API. This allows users to utilize the Elastic APM agent’s automatic instrumentations, while keeping custom instrumentations vendor neutral. + +If a span is created while there is no transaction active, it will result in an Elastic APM [`Transaction`](docs-content://solutions/observability/apm/transactions.md). Inner spans are mapped to Elastic APM [`Span`](docs-content://solutions/observability/apm/spans.md). + + +## Getting started [opentelemetry-getting-started] + +The first step in getting started with the OpenTelemetry bridge is to install the `opentelemetry` libraries: + +```bash +pip install elastic-apm[opentelemetry] +``` + +Or if you already have installed `elastic-apm`: + +```bash +pip install opentelemetry-api opentelemetry-sdk +``` + + +## Usage [opentelemetry-usage] + +```python +from elasticapm.contrib.opentelemetry import Tracer + +tracer = Tracer(__name__) +with tracer.start_as_current_span("test"): + # Do some work +``` + +or + +```python +from elasticapm.contrib.opentelemetry import trace + +tracer = trace.get_tracer(__name__) +with tracer.start_as_current_span("test"): + # Do some work +``` + +`Tracer` and `get_tracer()` accept the following optional arguments: + +* `elasticapm_client`: an already instantiated Elastic APM client +* `config`: a configuration dictionary, which will be used to instantiate a new Elastic APM client, e.g. `{"SERVER_URL": "https://example.org"}`. See [configuration](/reference/configuration.md) for more information. + +The `Tracer` object mirrors the upstream interface on the [OpenTelemetry `Tracer` object.](https://opentelemetry-python.readthedocs.io/en/latest/api/trace.html#opentelemetry.trace.Tracer) + + +## Caveats [opentelemetry-caveats] + +Not all features of the OpenTelemetry API are supported. + +Processors, exporters, metrics, logs, span events, and span links are not supported. + +Additionally, due to implementation details, the global context API only works when a span is included in the activated context, and tokens are not used. Instead, the global context works as a stack, and when a context is detached the previously-active context will automatically be activated. + diff --git a/docs/reference/performance-tuning.md b/docs/reference/performance-tuning.md new file mode 100644 index 000000000..6ff228baf --- /dev/null +++ b/docs/reference/performance-tuning.md @@ -0,0 +1,92 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/tuning-and-overhead.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Performance tuning [tuning-and-overhead] + +Using an APM solution comes with certain trade-offs, and the Python agent for Elastic APM is no different. Instrumenting your code, measuring timings, recording context data, etc., all need resources: + +* CPU time +* memory +* bandwidth use +* Elasticsearch storage + +We invested and continue to invest a lot of effort to keep the overhead of using Elastic APM as low as possible. But because every deployment is different, there are some knobs you can turn to adapt it to your specific needs. + + +## Transaction Sample Rate [tuning-sample-rate] + +The easiest way to reduce the overhead of the agent is to tell the agent to do less. If you set the [`transaction_sample_rate`](/reference/configuration.md#config-transaction-sample-rate) to a value below `1.0`, the agent will randomly sample only a subset of transactions. Unsampled transactions only record the name of the transaction, the overall transaction time, and the result: + +| Field | Sampled | Unsampled | +| --- | --- | --- | +| Transaction name | yes | yes | +| Duration | yes | yes | +| Result | yes | yes | +| Context | yes | no | +| Tags | yes | no | +| Spans | yes | no | + +Reducing the sample rate to a fraction of all transactions can make a huge difference in all four of the mentioned resource types. + + +## Transaction Queue [tuning-queue] + +To reduce the load on the APM Server, the agent does not send every transaction up as it happens. Instead, it queues them up and flushes the queue periodically, or when it reaches a maximum size, using a background thread. + +While this reduces the load on the APM Server (and to a certain extent on the agent), holding on to the transaction data in a queue uses memory. If you notice that using the Python agent results in a large increase of memory use, you can use these settings: + +* [`api_request_time`](/reference/configuration.md#config-api-request-time) to reduce the time between queue flushes +* [`api_request_size`](/reference/configuration.md#config-api-request-size) to reduce the maximum size of the queue + +The first setting, `api_request_time`, is helpful if you have a sustained high number of transactions. The second setting, `api_request_size`, can help if you experience peaks of transactions (a large number of transactions in a short period of time). + +Keep in mind that reducing the value of either setting will cause the agent to send more HTTP requests to the APM Server, potentially causing a higher load. + + +## Spans per transaction [tuning-max-spans] + +The average amount of spans per transaction can influence how much time the agent spends in each transaction collecting contextual data for each span, and the storage space needed in Elasticsearch. In our experience, most *usual* transactions should have well below 100 spans. In some cases, however, the number of spans can explode: + +* long-running transactions +* unoptimized code, e.g. doing hundreds of SQL queries in a loop + +To avoid these edge cases overloading both the agent and the APM Server, the agent stops recording spans when a specified limit is reached. You can configure this limit by changing the [`transaction_max_spans`](/reference/configuration.md#config-transaction-max-spans) setting. + + +## Span Stack Trace Collection [tuning-span-stack-trace-collection] + +Collecting stack traces for spans can be fairly costly from a performance standpoint. Stack traces are very useful for pinpointing which part of your code is generating a span; however, these stack traces are less useful for very short spans (as problematic spans tend to be longer). + +You can define a minimal threshold for span duration using the [`span_stack_trace_min_duration`](/reference/configuration.md#config-span-stack-trace-min-duration) setting. If a span’s duration is less than this config value, no stack frames will be collected for this span. + + +## Collecting Frame Context [tuning-frame-context] + +When a stack trace is captured, the agent will also capture several lines of source code around each frame location in the stack trace. This allows the APM app to give greater insight into where exactly the error or span happens. + +There are four settings you can modify to control this behavior: + +* [`source_lines_error_app_frames`](/reference/configuration.md#config-source-lines-error-app-frames) +* [`source_lines_error_library_frames`](/reference/configuration.md#config-source-lines-error-library-frames) +* [`source_lines_span_app_frames`](/reference/configuration.md#config-source-lines-span-app-frames) +* [`source_lines_span_library_frames`](/reference/configuration.md#config-source-lines-span-library-frames) + +As you can see, these settings are divided between app frames, which represent your application code, and library frames, which represent the code of your dependencies. Each of these categories are also split into separate error and span settings. + +Reading source files inside a running application can cause a lot of disk I/O, and sending up source lines for each frame will have a network and storage cost that is quite high. Turning down these limits will help prevent excessive memory usage. + + +## Collecting headers and request body [tuning-body-headers] + +You can configure the Elastic APM agent to capture headers of both requests and responses ([`capture_headers`](/reference/configuration.md#config-capture-headers)), as well as request bodies ([`capture_body`](/reference/configuration.md#config-capture-body)). By default, capturing request bodies is disabled. Enabling it for transactions may introduce noticeable overhead, as well as increased storage use, depending on the nature of your POST requests. In most scenarios, we advise against enabling request body capturing for transactions, and only enable it if necessary for errors. + +Capturing request/response headers has less overhead on the agent, but can have an impact on storage use. If storage use is a problem for you, it might be worth disabling. + diff --git a/docs/reference/run-tests-locally.md b/docs/reference/run-tests-locally.md new file mode 100644 index 000000000..758209217 --- /dev/null +++ b/docs/reference/run-tests-locally.md @@ -0,0 +1,78 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/run-tests-locally.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Run Tests Locally [run-tests-locally] + +To run tests locally you can make use of the docker images also used when running the whole test suite with Jenkins. Running the full test suite first does some linting and then runs the actual tests with different versions of Python and different web frameworks. For a full overview of the test matrix and supported versions have a look at [Jenkins Configuration](https://github.com/elastic/apm-agent-python/blob/main/Jenkinsfile). + + +### Pre Commit [pre-commit] + +We run our git hooks on every commit to automatically point out issues in code. Those issues are also detected within the GitHub actions. Please follow the installation steps stated in [https://pre-commit.com/#install](https://pre-commit.com/#install). + + +### Code Linter [coder-linter] + +We run two code linters `isort` and `flake8`. You can trigger each single one locally by running: + +```bash +$ pre-commit run -a isort +``` + +```bash +$ pre-commit run -a flake8 +``` + + +### Code Formatter [coder-formatter] + +We test that the code is formatted using `black`. You can trigger this check by running: + +```bash +$ pre-commit run -a black +``` + + +### Test Documentation [test-documentation] + +We test that the documentation can be generated without errors. You can trigger this check by running: + +```bash +$ ./tests/scripts/docker/docs.sh +``` + + +### Running Tests [running-tests] + +We run the test suite on different combinations of Python versions and web frameworks. For triggering the test suite for a specific combination locally you can run: + +```bash +$ ./tests/scripts/docker/run_tests.sh python-version framework-version +``` + +::::{note} +The `python-version` must be of format `python-version`, e.g. `python-3.6` or `pypy-2`. The `framework` must be of format `framework-version`, e.g. `django-1.10` or `flask-0.12`. +:::: + + +You can also run the unit tests outside of docker, by installing the relevant [requirements file](https://github.com/elastic/apm-agent-python/tree/main/tests/requirements) and then running `py.test` from the project root. + +## Integration testing [_integration_testing] + +Check out [https://github.com/elastic/apm-integration-testing](https://github.com/elastic/apm-integration-testing) for resources for setting up full end-to-end testing environments. For example, to spin up an environment with the [opbeans Django app](https://github.com/basepi/opbeans-python), with version 7.3 of the elastic stack and the apm-python-agent from your local checkout, you might do something like this: + +```bash +$ ./scripts/compose.py start 7.3 \ + --with-agent-python-django --with-opbeans-python \ + --opbeans-python-agent-local-repo=~/elastic/apm-agent-python +``` + + diff --git a/docs/reference/sanic-support.md b/docs/reference/sanic-support.md new file mode 100644 index 000000000..953527a22 --- /dev/null +++ b/docs/reference/sanic-support.md @@ -0,0 +1,146 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/sanic-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Sanic Support [sanic-support] + +Incorporating Elastic APM into your Sanic project only requires a few easy steps. + + +## Installation [sanic-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add `elastic-apm` to your project’s `requirements.txt` file. + + +## Setup [sanic-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To initialize the agent for your application using environment variables: + +```python +from sanic import Sanic +from elasticapm.contrib.sanic import ElasticAPM + +app = Sanic(name="elastic-apm-sample") +apm = ElasticAPM(app=app) +``` + +To configure the agent using initialization arguments and Sanic’s Configuration infrastructure: + +```python +# Create a file named external_config.py in your application +# If you want this module based configuration to be used for APM, prefix them with ELASTIC_APM_ +ELASTIC_APM_SERVER_URL = "https://serverurl.example.com:443" +ELASTIC_APM_SECRET_TOKEN = "sometoken" +``` + +```python +from sanic import Sanic +from elasticapm.contrib.sanic import ElasticAPM + +app = Sanic(name="elastic-apm-sample") +app.config.update_config("path/to/external_config.py") +apm = ElasticAPM(app=app) +``` + + +## Usage [sanic-usage] + +Once you have configured the agent, it will automatically track transactions and capture uncaught exceptions within sanic. + +Capture an arbitrary exception by calling [`capture_exception`](/reference/api-reference.md#client-api-capture-exception): + +```python +from sanic import Sanic +from elasticapm.contrib.sanic import ElasticAPM + +app = Sanic(name="elastic-apm-sample") +apm = ElasticAPM(app=app) + +try: + 1 / 0 +except ZeroDivisionError: + apm.capture_exception() +``` + +Log a generic message with [`capture_message`](/reference/api-reference.md#client-api-capture-message): + +```python +from sanic import Sanic +from elasticapm.contrib.sanic import ElasticAPM + +app = Sanic(name="elastic-apm-sample") +apm = ElasticAPM(app=app) + +apm.capture_message('hello, world!') +``` + + +## Performance metrics [sanic-performance-metrics] + +If you’ve followed the instructions above, the agent has installed our instrumentation middleware which will process all requests through your app. This will measure response times, as well as detailed performance data for all supported technologies. + +::::{note} +Due to the fact that `asyncio` drivers are usually separate from their synchronous counterparts, specific instrumentation is needed for all drivers. The support for asynchronous drivers is currently quite limited. +:::: + + + +### Ignoring specific routes [sanic-ignoring-specific-views] + +You can use the [`TRANSACTIONS_IGNORE_PATTERNS`](/reference/configuration.md#config-transactions-ignore-patterns) configuration option to ignore specific routes. The list given should be a list of regular expressions which are matched against the transaction name: + +```python +from sanic import Sanic +from elasticapm.contrib.sanic import ElasticAPM + +app = Sanic(name="elastic-apm-sample") +apm = ElasticAPM(app=app, config={ + 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET /secret', '/extra_secret'], +}) +``` + +This would ignore any requests using the `GET /secret` route and any requests containing `/extra_secret`. + + +## Extended Sanic APM Client Usage [extended-sanic-usage] + +Sanic’s contributed APM client also provides a few extendable way to configure selective behaviors to enhance the information collected as part of the transactions being tracked by the APM. + +In order to enable this behavior, the APM Client middleware provides a few callback functions that you can leverage in order to simplify the process of generating additional contexts into the traces being collected. + +| Callback Name | Callback Invocation Format | Expected Return Format | Is Async | +| --- | --- | --- | --- | +| transaction_name_callback | transaction_name_callback(request) | string | false | +| user_context_callback | user_context_callback(request) | (username_string, user_email_string, userid_string) | true | +| custom_context_callback | custom_context_callback(request) or custom_context_callback(response) | dict(str=str) | true | +| label_info_callback | label_info_callback() | dict(str=str) | true | + + +## Supported Sanic and Python versions [supported-stanic-and-python-versions] + +A list of supported [Sanic](/reference/supported-technologies.md#supported-sanic) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + +::::{note} +Elastic APM only supports `asyncio` when using Python 3.7+ +:::: + + diff --git a/docs/sanitizing-data.asciidoc b/docs/reference/sanitizing-data.md similarity index 68% rename from docs/sanitizing-data.asciidoc rename to docs/reference/sanitizing-data.md index 4daa9eb8f..51694b88d 100644 --- a/docs/sanitizing-data.asciidoc +++ b/docs/reference/sanitizing-data.md @@ -1,22 +1,27 @@ -[[sanitizing-data]] -=== Sanitizing data +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/sanitizing-data.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- -Sometimes it is necessary to sanitize the data sent to Elastic APM, -e.g. remove sensitive data. +# Sanitizing data [sanitizing-data] -To do this with the Elastic APM module, you create a processor. -A processor is a function that takes a `client` instance as well as an event (an error, a transaction, a span, or a metricset), -and returns the modified event. +Sometimes it is necessary to sanitize the data sent to Elastic APM, e.g. remove sensitive data. + +To do this with the Elastic APM module, you create a processor. A processor is a function that takes a `client` instance as well as an event (an error, a transaction, a span, or a metricset), and returns the modified event. To completely drop an event, your processor should return `False` (or any other "falsy" value) instead of the event. -An event will also be dropped if any processor raises an exception while processing it. -A log message with level `WARNING` will be issued in this case. +An event will also be dropped if any processor raises an exception while processing it. A log message with level `WARNING` will be issued in this case. This is an example of a processor that removes the exception stacktrace from an error: -[source,python] ----- +```python from elasticapm.conf.constants import ERROR from elasticapm.processors import for_events @@ -25,16 +30,13 @@ def my_processor(client, event): if 'exception' in event and 'stacktrace' in event['exception']: event['exception'].pop('stacktrace') return event ----- +``` -You can use the `@for_events` decorator to limit for which event type the processor should be called. -Possible choices are `ERROR`, `TRANSACTION`, `SPAN` and `METRICSET`, -all of which are defined in `elasticapm.conf.constants`. +You can use the `@for_events` decorator to limit for which event type the processor should be called. Possible choices are `ERROR`, `TRANSACTION`, `SPAN` and `METRICSET`, all of which are defined in `elasticapm.conf.constants`. To use this processor, update your `ELASTIC_APM` settings like this: -[source,python] ----- +```python ELASTIC_APM = { 'SERVICE_NAME': '', 'SECRET_TOKEN': '', @@ -47,14 +49,16 @@ ELASTIC_APM = { 'elasticapm.processors.sanitize_http_request_body', ), } ----- +``` + +::::{note} +We recommend using the above list of processors that sanitize passwords and secrets in different places of the event object. +:::: -NOTE: We recommend using the above list of processors that sanitize passwords and secrets in different places of the event object. -The default set of processors sanitize fields based on a set of defaults defined in `elasticapm.conf.constants`. This set can be configured with the `SANITIZE_FIELD_NAMES` configuration option. For example, if your application produces a sensitive field called `My-Sensitive-Field`, the default processors can be used to automatically sanitize this field. You can specify what fields to santize within default processors like this: +The default set of processors sanitize fields based on a set of defaults defined in `elasticapm.conf.constants`. This set can be configured with the `SANITIZE_FIELD_NAMES` configuration option. For example, if your application produces a sensitive field called `My-Sensitive-Field`, the default processors can be used to automatically sanitize this field. You can specify what fields to sanitize within default processors like this: -[source,python] ----- +```python ELASTIC_APM = { 'SERVICE_NAME': '', 'SECRET_TOKEN': '', @@ -72,8 +76,12 @@ ELASTIC_APM = { "set-cookie", ), } ----- +``` + +::::{note} +We recommend to use the above list of fields to sanitize various parts of the event object in addition to your specified fields. +:::: -NOTE: We recommend to use the above list of fields to sanitize various parts of the event object in addition to your specified fields. When choosing fields names to sanitize, you can specify values that will match certain wildcards. For example, passing `base` as a field name to be sanitized will also sanitize all fields whose names match the regex pattern `\*base*`. + diff --git a/docs/reference/set-up-apm-python-agent.md b/docs/reference/set-up-apm-python-agent.md new file mode 100644 index 000000000..8ed82bc59 --- /dev/null +++ b/docs/reference/set-up-apm-python-agent.md @@ -0,0 +1,38 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/set-up.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Set up the APM Python Agent [set-up] + +To get you off the ground, we’ve prepared guides for setting up the Agent with different frameworks: + +* [Django](/reference/django-support.md) +* [Flask](/reference/flask-support.md) +* [aiohttp](/reference/aiohttp-server-support.md) +* [Tornado](/reference/tornado-support.md) +* [Starlette/FastAPI](/reference/starlette-support.md) +* [Sanic](/reference/sanic-support.md) +* [AWS Lambda](/reference/lambda-support.md) +* [Azure Functions](/reference/azure-functions-support.md) +* [Wrapper (Experimental)](/reference/wrapper-support.md) +* [ASGI Middleware](/reference/asgi-middleware.md) + +For custom instrumentation, see [Instrumenting Custom Code](/reference/instrumenting-custom-code.md). + + + + + + + + + + + diff --git a/docs/reference/starlette-support.md b/docs/reference/starlette-support.md new file mode 100644 index 000000000..fcffa08f3 --- /dev/null +++ b/docs/reference/starlette-support.md @@ -0,0 +1,133 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/starlette-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Starlette/FastAPI Support [starlette-support] + +Incorporating Elastic APM into your Starlette project only requires a few easy steps. + + +## Installation [starlette-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add `elastic-apm` to your project’s `requirements.txt` file. + + +## Setup [starlette-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To initialize the agent for your application using environment variables, add the ElasticAPM middleware to your Starlette application: + +```python +from starlette.applications import Starlette +from elasticapm.contrib.starlette import ElasticAPM + +app = Starlette() +app.add_middleware(ElasticAPM) +``` + +::::{warning} +`BaseHTTPMiddleware` breaks `contextvar` propagation, as noted [here](https://www.starlette.io/middleware/#limitations). This means the ElasticAPM middleware must be above any `BaseHTTPMiddleware` in the final middleware list. If you’re calling `add_middleware` repeatedly, add the ElasticAPM middleware last. If you’re passing in a list of middleware, ElasticAPM should be first on that list. +:::: + + +To configure the agent using initialization arguments: + +```python +from starlette.applications import Starlette +from elasticapm.contrib.starlette import make_apm_client, ElasticAPM + +apm = make_apm_client({ + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', + 'SERVER_URL': '', +}) +app = Starlette() +app.add_middleware(ElasticAPM, client=apm) +``` + + +## FastAPI [starlette-fastapi] + +Because FastAPI supports Starlette middleware, using the agent with FastAPI is almost exactly the same as with Starlette: + +```python +from fastapi import FastAPI +from elasticapm.contrib.starlette import ElasticAPM + +app = FastAPI() +app.add_middleware(ElasticAPM) +``` + + +## Usage [starlette-usage] + +Once you have configured the agent, it will automatically track transactions and capture uncaught exceptions within starlette. + +Capture an arbitrary exception by calling [`capture_exception`](/reference/api-reference.md#client-api-capture-exception): + +```python +try: + 1 / 0 +except ZeroDivisionError: + apm.capture_exception() +``` + +Log a generic message with [`capture_message`](/reference/api-reference.md#client-api-capture-message): + +```python +apm.capture_message('hello, world!') +``` + + +## Performance metrics [starlette-performance-metrics] + +If you’ve followed the instructions above, the agent has installed our instrumentation middleware which will process all requests through your app. This will measure response times, as well as detailed performance data for all supported technologies. + +::::{note} +Due to the fact that `asyncio` drivers are usually separate from their synchronous counterparts, specific instrumentation is needed for all drivers. The support for asynchronous drivers is currently quite limited. +:::: + + + +### Ignoring specific routes [starlette-ignoring-specific-views] + +You can use the [`TRANSACTIONS_IGNORE_PATTERNS`](/reference/configuration.md#config-transactions-ignore-patterns) configuration option to ignore specific routes. The list given should be a list of regular expressions which are matched against the transaction name: + +```python +apm = make_apm_client({ + # ... + 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET /secret', '/extra_secret'] + # ... +}) +``` + +This would ignore any requests using the `GET /secret` route and any requests containing `/extra_secret`. + + +## Supported Starlette and Python versions [supported-starlette-and-python-versions] + +A list of supported [Starlette](/reference/supported-technologies.md#supported-starlette) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + +::::{note} +Elastic APM only supports `asyncio` when using Python 3.7+ +:::: + + diff --git a/docs/reference/supported-technologies.md b/docs/reference/supported-technologies.md new file mode 100644 index 000000000..47ee5c12b --- /dev/null +++ b/docs/reference/supported-technologies.md @@ -0,0 +1,657 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/supported-technologies.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Supported technologies [supported-technologies] + +$$$framework-support$$$ +The Elastic APM Python Agent comes with support for the following frameworks: + +* [Django](/reference/django-support.md) +* [Flask](/reference/flask-support.md) +* [Aiohttp Server](#supported-aiohttp) +* [Tornado](#supported-tornado) +* [Starlette/FastAPI](#supported-starlette) +* [Sanic](#supported-sanic) +* [GRPC](#supported-grpc) + +For other frameworks and custom Python code, the agent exposes a set of [APIs](/reference/api-reference.md) for integration. + + +### Python [supported-python] + +The following Python versions are supported: + +* 3.6 +* 3.7 +* 3.8 +* 3.9 +* 3.10 +* 3.11 +* 3.12 +* 3.13 + + +### Django [supported-django] + +We support these Django versions: + +* 1.11 +* 2.0 +* 2.1 +* 2.2 +* 3.0 +* 3.1 +* 3.2 +* 4.0 +* 4.2 +* 5.0 + +For upcoming Django versions, we generally aim to ensure compatibility starting with the first Release Candidate. + +::::{note} +we currently don’t support Django running in ASGI mode. +:::: + + + +### Flask [supported-flask] + +We support these Flask versions: + +* 0.10 (Deprecated) +* 0.11 (Deprecated) +* 0.12 (Deprecated) +* 1.0 +* 1.1 +* 2.0 +* 2.1 +* 2.2 +* 2.3 +* 3.0 + + +### Aiohttp Server [supported-aiohttp] + +We support these aiohttp versions: + +* 3.x + + +### Tornado [supported-tornado] + +We support these tornado versions: + +* 6.x + + +### Sanic [supported-sanic] + +We support these sanic versions: + +* >20.12.2,<26 + + +### Starlette/FastAPI [supported-starlette] + +We support these Starlette versions: + +* >0.13.0,<1 + +Any FastAPI version which uses a supported Starlette version should also be supported. + + +### GRPC [supported-grpc] + +We support these `grpcio` versions: + +* >1.24.0,<2 + + +## Automatic Instrumentation [automatic-instrumentation] + +The Python APM agent comes with automatic instrumentation of various 3rd party modules and standard library modules. + + +### Scheduling [automatic-instrumentation-scheduling] + + +##### Celery [automatic-instrumentation-scheduling-celery] + +We support these Celery versions: + +* 4.x (deprecated) +* 5.x + +Celery tasks will be recorded automatically with Django and Flask only. + + +### Databases [automatic-instrumentation-db] + + +#### Elasticsearch [automatic-instrumentation-db-elasticsearch] + +Instrumented methods: + +* `elasticsearch.transport.Transport.perform_request` +* `elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request` +* `elasticsearch.connection.http_requests.RequestsHttpConnection.perform_request` +* `elasticsearch._async.transport.AsyncTransport.perform_request` +* `elasticsearch_async.connection.AIOHttpConnection.perform_request` + +Additionally, the instrumentation wraps the following methods of the `Elasticsearch` client class: + +* `elasticsearch.client.Elasticsearch.delete_by_query` +* `elasticsearch.client.Elasticsearch.search` +* `elasticsearch.client.Elasticsearch.count` +* `elasticsearch.client.Elasticsearch.update` + +Collected trace data: + +* the query string (if available) +* the `query` element from the request body (if available) +* the response status code +* the count of affected rows (if available) + +We recommend using keyword arguments only with elasticsearch-py, as recommended by [the elasticsearch-py docs](https://elasticsearch-py.readthedocs.io/en/latest/api.html#api-documentation). If you are using positional arguments, we will be unable to gather the `query` element from the request body. + + +#### SQLite [automatic-instrumentation-db-sqlite] + +Instrumented methods: + +* `sqlite3.connect` +* `sqlite3.dbapi2.connect` +* `pysqlite2.dbapi2.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### MySQLdb [automatic-instrumentation-db-mysql] + +Library: `MySQLdb` (`<2`) + +Instrumented methods: + +* `MySQLdb.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### mysql-connector [automatic-instrumentation-db-mysql-connector] + +Library: `mysql-connector-python` (`<9`) + +Instrumented methods: + +* `mysql.connector.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### pymysql [automatic-instrumentation-db-pymysql] + +Library: `pymysql` (`<2`) + +Instrumented methods: + +* `pymysql.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### aiomysql [automatic-instrumentation-db-aiomysql] + +Library: `aiomysql` (`<1`) + +Instrumented methods: + +* `aiomysql.cursors.Cursor.execute` + +Collected trace data: + +* parametrized SQL query + + +#### PostgreSQL Psycopg2 [automatic-instrumentation-db-postgres] + +Library: `psycopg2`, `psycopg2-binary` (`>=2.9,<3`) + +Instrumented methods: + +* `psycopg2.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + +#### PostgreSQL Psycopg [automatic-instrumentation-db-postgres-psycopg] + +Library: `psycopg`, `psycopg-binary` (`>3.0.0,<4`) + +Instrumented methods: + +* `psycopg.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + +#### aiopg [automatic-instrumentation-db-aiopg] + +Library: `aiopg` (`>=1.0,<2`) + +Instrumented methods: + +* `aiopg.cursor.Cursor.execute` +* `aiopg.cursor.Cursor.callproc` + +Collected trace data: + +* parametrized SQL query + + +#### asyncpg [automatic-instrumentation-db-asyncg] + +Library: `asyncpg` (`>=0.20,<2`) + +Instrumented methods: + +* `asyncpg.connection.Connection.execute` +* `asyncpg.connection.Connection.executemany` + +Collected trace data: + +* parametrized SQL query + + +#### PyODBC [automatic-instrumentation-db-pyodbc] + +Library: `pyodbc` (`>=4.0,<6`) + +Instrumented methods: + +* `pyodbc.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### MS-SQL [automatic-instrumentation-db-mssql] + +Library: `pymssql` (`>=2.1.0,<3`) + +Instrumented methods: + +* `pymssql.connect` + +The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. + +Collected trace data: + +* parametrized SQL query + + +#### MongoDB [automatic-instrumentation-db-mongodb] + +Library: `pymongo` (`>=2.9,<5`) + +Instrumented methods: + +* `pymongo.collection.Collection.aggregate` +* `pymongo.collection.Collection.bulk_write` +* `pymongo.collection.Collection.count` +* `pymongo.collection.Collection.create_index` +* `pymongo.collection.Collection.create_indexes` +* `pymongo.collection.Collection.delete_many` +* `pymongo.collection.Collection.delete_one` +* `pymongo.collection.Collection.distinct` +* `pymongo.collection.Collection.drop` +* `pymongo.collection.Collection.drop_index` +* `pymongo.collection.Collection.drop_indexes` +* `pymongo.collection.Collection.ensure_index` +* `pymongo.collection.Collection.find_and_modify` +* `pymongo.collection.Collection.find_one` +* `pymongo.collection.Collection.find_one_and_delete` +* `pymongo.collection.Collection.find_one_and_replace` +* `pymongo.collection.Collection.find_one_and_update` +* `pymongo.collection.Collection.group` +* `pymongo.collection.Collection.inline_map_reduce` +* `pymongo.collection.Collection.insert` +* `pymongo.collection.Collection.insert_many` +* `pymongo.collection.Collection.insert_one` +* `pymongo.collection.Collection.map_reduce` +* `pymongo.collection.Collection.reindex` +* `pymongo.collection.Collection.remove` +* `pymongo.collection.Collection.rename` +* `pymongo.collection.Collection.replace_one` +* `pymongo.collection.Collection.save` +* `pymongo.collection.Collection.update` +* `pymongo.collection.Collection.update_many` +* `pymongo.collection.Collection.update_one` + +Collected trace data: + +* database name +* method name + + +#### Redis [automatic-instrumentation-db-redis] + +Library: `redis` (`>=2.8,<8`) + +Instrumented methods: + +* `redis.client.Redis.execute_command` +* `redis.client.Pipeline.execute` + +Collected trace data: + +* Redis command name + + +#### aioredis [automatic-instrumentation-db-aioredis] + +Library: `aioredis` (`<=2.0.1`) + +Instrumented methods: + +* `aioredis.pool.ConnectionsPool.execute` +* `aioredis.commands.transaction.Pipeline.execute` +* `aioredis.connection.RedisConnection.execute` + +Collected trace data: + +* Redis command name + + +#### Cassandra [automatic-instrumentation-db-cassandra] + +Library: `cassandra-driver` (`>=3.4,<4.0`) + +Instrumented methods: + +* `cassandra.cluster.Session.execute` +* `cassandra.cluster.Cluster.connect` + +Collected trace data: + +* CQL query + + +#### Python Memcache [automatic-instrumentation-db-python-memcache] + +Library: `python-memcached` (`>=1.51,<2`) + +Instrumented methods: + +* `memcache.Client.add` +* `memcache.Client.append` +* `memcache.Client.cas` +* `memcache.Client.decr` +* `memcache.Client.delete` +* `memcache.Client.delete_multi` +* `memcache.Client.disconnect_all` +* `memcache.Client.flush_all` +* `memcache.Client.get` +* `memcache.Client.get_multi` +* `memcache.Client.get_slabs` +* `memcache.Client.get_stats` +* `memcache.Client.gets` +* `memcache.Client.incr` +* `memcache.Client.prepend` +* `memcache.Client.replace` +* `memcache.Client.set` +* `memcache.Client.set_multi` +* `memcache.Client.touch` + +Collected trace data: + +* Destination (address and port) + + +#### pymemcache [automatic-instrumentation-db-pymemcache] + +Library: `pymemcache` (`>=3.0,<4.1`) + +Instrumented methods: + +* `pymemcache.client.base.Client.add` +* `pymemcache.client.base.Client.append` +* `pymemcache.client.base.Client.cas` +* `pymemcache.client.base.Client.decr` +* `pymemcache.client.base.Client.delete` +* `pymemcache.client.base.Client.delete_many` +* `pymemcache.client.base.Client.delete_multi` +* `pymemcache.client.base.Client.flush_all` +* `pymemcache.client.base.Client.get` +* `pymemcache.client.base.Client.get_many` +* `pymemcache.client.base.Client.get_multi` +* `pymemcache.client.base.Client.gets` +* `pymemcache.client.base.Client.gets_many` +* `pymemcache.client.base.Client.incr` +* `pymemcache.client.base.Client.prepend` +* `pymemcache.client.base.Client.quit` +* `pymemcache.client.base.Client.replace` +* `pymemcache.client.base.Client.set` +* `pymemcache.client.base.Client.set_many` +* `pymemcache.client.base.Client.set_multi` +* `pymemcache.client.base.Client.stats` +* `pymemcache.client.base.Client.touch` + +Collected trace data: + +* Destination (address and port) + + +#### kafka-python [automatic-instrumentation-db-kafka-python] + +Library: `kafka-python` (`>=2.0,<3`) + +Instrumented methods: + +* `kafka.KafkaProducer.send`, +* `kafka.KafkaConsumer.poll`, +* `kafka.KafkaConsumer.__next__` + +Collected trace data: + +* Destination (address and port) +* topic (if applicable) + + +### External HTTP requests [automatic-instrumentation-http] + + +#### Standard library [automatic-instrumentation-stdlib-urllib] + +Library: `urllib.request` (Python 3) + +Instrumented methods: + +* `urllib.request.AbstractHTTPHandler.do_open` + +Collected trace data: + +* HTTP method +* requested URL + + +#### urllib3 [automatic-instrumentation-urllib3] + +Library: `urllib3` (`<3`) + +Instrumented methods: + +* `urllib3.connectionpool.HTTPConnectionPool.urlopen` + +Additionally, we instrumented vendored instances of urllib3 in the following libraries: + +* `requests` +* `botocore` + +Both libraries have "unvendored" urllib3 in more recent versions, we recommend to use the newest versions. + +Collected trace data: + +* HTTP method +* requested URL + + +#### requests [automatic-instrumentation-requests] + +Library: `requests` (`<3`) + +Instrumented methods: + +* `requests.sessions.Session.send` + +Collected trace data: + +* HTTP method +* requested URL + + +#### AIOHTTP Client [automatic-instrumentation-aiohttp-client] + +Library: `aiohttp` (`>=3,<4`) + +Instrumented methods: + +* `aiohttp.client.ClientSession._request` + +Collected trace data: + +* HTTP method +* requested URL + + +#### httpx [automatic-instrumentation-httpx] + +Library: `httpx` (`<1`) + +Instrumented methods: + +* `httpx.Client.send + +Collected trace data: + +* HTTP method +* requested URL + + +### Services [automatic-instrumentation-services] + + +#### AWS Boto3 / Botocore [automatic-instrumentation-boto3] + +Library: `boto3` (`>=1.0,<2`) + +Instrumented methods: + +* `botocore.client.BaseClient._make_api_call` + +Collected trace data for all services: + +* AWS region (e.g. `eu-central-1`) +* AWS service name (e.g. `s3`) +* operation name (e.g. `ListBuckets`) + +Additionally, some services collect more specific data + + +#### AWS Aiobotocore [automatic-instrumentation-aiobotocore] + +Library: `aiobotocore` (`>=2.2.0,<3`) + +Instrumented methods: + +* `aiobotocore.client.BaseClient._make_api_call` + +Collected trace data for all services: + +* AWS region (e.g. `eu-central-1`) +* AWS service name (e.g. `s3`) +* operation name (e.g. `ListBuckets`) + +Additionally, some services collect more specific data + + +##### S3 [automatic-instrumentation-s3] + +* Bucket name + + +##### DynamoDB [automatic-instrumentation-dynamodb] + +* Table name + + +##### SNS [automatic-instrumentation-sns] + +* Topic name + + +##### SQS [automatic-instrumentation-sqs] + +* Queue name + + +### Template Engines [automatic-instrumentation-template-engines] + + +#### Django Template Language [automatic-instrumentation-dtl] + +Library: `Django` (see [Django](#supported-django) for supported versions) + +Instrumented methods: + +* `django.template.Template.render` + +Collected trace data: + +* template name + + +#### Jinja2 [automatic-instrumentation-jinja2] + +Library: `jinja2` + +Instrumented methods: + +* `jinja2.Template.render` + +Collected trace data: + +* template name + diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml new file mode 100644 index 000000000..1cd287df6 --- /dev/null +++ b/docs/reference/toc.yml @@ -0,0 +1,35 @@ +project: 'APM Python agent reference' +toc: + - file: index.md + - file: set-up-apm-python-agent.md + children: + - file: django-support.md + - file: flask-support.md + - file: aiohttp-server-support.md + - file: tornado-support.md + - file: starlette-support.md + - file: sanic-support.md + - file: lambda-support.md + - file: azure-functions-support.md + - file: wrapper-support.md + - file: asgi-middleware.md + - file: supported-technologies.md + - file: configuration.md + - file: advanced-topics.md + children: + - file: instrumenting-custom-code.md + - file: sanitizing-data.md + - file: how-agent-works.md + - file: run-tests-locally.md + - file: api-reference.md + - file: metrics.md + - file: opentelemetry-api-bridge.md + - file: logs.md + - file: performance-tuning.md + - file: upgrading.md + children: + - file: upgrading-6-x.md + - file: upgrading-5-x.md + - file: upgrading-4-x.md + - title: Troubleshooting + crosslink: docs-content://troubleshoot/observability/apm-agent-python/apm-python-agent.md \ No newline at end of file diff --git a/docs/reference/tornado-support.md b/docs/reference/tornado-support.md new file mode 100644 index 000000000..6a7530b95 --- /dev/null +++ b/docs/reference/tornado-support.md @@ -0,0 +1,114 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/tornado-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Tornado Support [tornado-support] + +Incorporating Elastic APM into your Tornado project only requires a few easy steps. + + +## Installation [tornado-installation] + +Install the Elastic APM agent using pip: + +```bash +$ pip install elastic-apm +``` + +or add `elastic-apm` to your project’s `requirements.txt` file. + + +## Setup [tornado-setup] + +To set up the agent, you need to initialize it with appropriate settings. + +The settings are configured either via environment variables, the application’s settings, or as initialization arguments. + +You can find a list of all available settings in the [Configuration](/reference/configuration.md) page. + +To initialize the agent for your application using environment variables: + +```python +import tornado.web +from elasticapm.contrib.tornado import ElasticAPM + +app = tornado.web.Application() +apm = ElasticAPM(app) +``` + +To configure the agent using `ELASTIC_APM` in your application’s settings: + +```python +import tornado.web +from elasticapm.contrib.tornado import ElasticAPM + +app = tornado.web.Application() +app.settings['ELASTIC_APM'] = { + 'SERVICE_NAME': '', + 'SECRET_TOKEN': '', +} +apm = ElasticAPM(app) +``` + + +## Usage [tornado-usage] + +Once you have configured the agent, it will automatically track transactions and capture uncaught exceptions within tornado. + +Capture an arbitrary exception by calling [`capture_exception`](/reference/api-reference.md#client-api-capture-exception): + +```python +try: + 1 / 0 +except ZeroDivisionError: + apm.client.capture_exception() +``` + +Log a generic message with [`capture_message`](/reference/api-reference.md#client-api-capture-message): + +```python +apm.client.capture_message('hello, world!') +``` + + +## Performance metrics [tornado-performance-metrics] + +If you’ve followed the instructions above, the agent has installed our instrumentation within the base RequestHandler class in tornado.web. This will measure response times, as well as detailed performance data for all supported technologies. + +::::{note} +Due to the fact that `asyncio` drivers are usually separate from their synchronous counterparts, specific instrumentation is needed for all drivers. The support for asynchronous drivers is currently quite limited. +:::: + + + +### Ignoring specific routes [tornado-ignoring-specific-views] + +You can use the [`TRANSACTIONS_IGNORE_PATTERNS`](/reference/configuration.md#config-transactions-ignore-patterns) configuration option to ignore specific routes. The list given should be a list of regular expressions which are matched against the transaction name: + +```python +app.settings['ELASTIC_APM'] = { + # ... + 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET SecretHandler', 'MainHandler'] + # ... +} +``` + +This would ignore any requests using the `GET SecretHandler` route and any requests containing `MainHandler`. + + +## Supported tornado and Python versions [supported-tornado-and-python-versions] + +A list of supported [tornado](/reference/supported-technologies.md#supported-tornado) and [Python](/reference/supported-technologies.md#supported-python) versions can be found on our [Supported Technologies](/reference/supported-technologies.md) page. + +::::{note} +Elastic APM only supports `asyncio` when using Python 3.7+ +:::: + + diff --git a/docs/reference/upgrading-4-x.md b/docs/reference/upgrading-4-x.md new file mode 100644 index 000000000..40ddc547b --- /dev/null +++ b/docs/reference/upgrading-4-x.md @@ -0,0 +1,35 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/upgrading-4.x.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Upgrading to version 4 of the agent [upgrading-4-x] + +4.0 of the Elastic APM Python Agent comes with several backwards incompatible changes. + +## APM Server 6.5 required [upgrading-4-x-apm-server] + +This version of the agent is **only compatible with APM Server 6.5+**. To upgrade, we recommend to first upgrade APM Server, and then the agent. APM Server 6.5+ is backwards compatible with versions 2.x and 3.x of the agent. + + +## Configuration options [upgrading-4-x-configuration] + +Several configuration options have been removed, or renamed + +* `flush_interval` has been removed +* the `flush_interval` and `max_queue_size` settings have been removed. +* new settings introduced: `api_request_time` and `api_request_size`. +* Some settings now require a unit for duration or size. See [size format](configuration.md#config-format-size) and [duration format](configuration.md#config-format-duration). + + +## Processors [upgrading-4-x-processors] + +The method to write processors for sanitizing events has been changed. It will now be called for every type of event (transactions, spans and errors), unless the event types are limited using a decorator. See [Sanitizing data](sanitizing-data.md) for more information. + + diff --git a/docs/reference/upgrading-5-x.md b/docs/reference/upgrading-5-x.md new file mode 100644 index 000000000..cabf9616a --- /dev/null +++ b/docs/reference/upgrading-5-x.md @@ -0,0 +1,25 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/upgrading-5.x.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Upgrading to version 5 of the agent [upgrading-5-x] + +## APM Server 7.3 required for some features [_apm_server_7_3_required_for_some_features] + +APM Server and Kibana 7.3 introduced support for collecting breakdown metrics, and central configuration of APM agents. To use these features, please update the Python agent to 5.0+ and APM Server / Kibana to 7.3+ + + +## Tags renamed to Labels [_tags_renamed_to_labels] + +To better align with other parts of the Elastic Stack and the [Elastic Common Schema](ecs://reference/index.md), we renamed "tags" to "labels", and introduced limited support for typed labels. While tag values were only allowed to be strings, label values can be strings, booleans, or numerical. + +To benefit from this change, ensure that you run at least **APM Server 6.7**, and use `elasticapm.label()` instead of `elasticapm.tag()`. The `tag()` API will continue to work as before, but emit a `DeprecationWarning`. It will be removed in 6.0 of the agent. + + diff --git a/docs/reference/upgrading-6-x.md b/docs/reference/upgrading-6-x.md new file mode 100644 index 000000000..df1c59acd --- /dev/null +++ b/docs/reference/upgrading-6-x.md @@ -0,0 +1,28 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/upgrading-6.x.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Upgrading to version 6 of the agent [upgrading-6-x] + +## Python 2 no longer supported [_python_2_no_longer_supported] + +Please upgrade to Python 3.6+ to continue to receive regular updates. + + +## `SANITIZE_FIELD_NAMES` changes [_sanitize_field_names_changes] + +If you are using a non-default `sanitize_field_names` config, please note that your entries must be surrounded with stars (e.g. `*secret*`) in order to maintain previous behavior. + + +## Tags removed (in favor of labels) [_tags_removed_in_favor_of_labels] + +Tags were deprecated in the 5.x release (in favor of labels). They have now been removed. + + diff --git a/docs/reference/upgrading.md b/docs/reference/upgrading.md new file mode 100644 index 000000000..28f424b99 --- /dev/null +++ b/docs/reference/upgrading.md @@ -0,0 +1,25 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/upgrading.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Upgrading [upgrading] + +Upgrades between minor versions of the agent, like from 3.1 to 3.2 are always backwards compatible. Upgrades that involve a major version bump often come with some backwards incompatible changes. + +We highly recommend to always pin the version of `elastic-apm` in your `requirements.txt` or `Pipfile`. This avoids automatic upgrades to potentially incompatible versions. + + +## End of life dates [end-of-life-dates] + +We love all our products, but sometimes we must say goodbye to a release so that we can continue moving forward on future development and innovation. Our [End of life policy](https://www.elastic.co/support/eol) defines how long a given release is considered supported, as well as how long a release is considered still in active development or maintenance. + + + + diff --git a/docs/reference/wrapper-support.md b/docs/reference/wrapper-support.md new file mode 100644 index 000000000..db054a598 --- /dev/null +++ b/docs/reference/wrapper-support.md @@ -0,0 +1,62 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/wrapper-support.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: preview +--- + +# Wrapper Support [wrapper-support] + +::::{warning} +This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. +:::: + + +The following frameworks are supported using our new wrapper script for no-code-changes instrumentation: + +* Django +* Flask +* Starlette + +Please keep in mind that these instrumentations are a work in progress! We’d love to have feedback on our [issue tracker](https://github.com/elastic/apm-agent-python/issues/new/choose). + +## Usage [wrapper-usage] + +When installing the agent, an entrypoint script, `elasticapm-run` is installed as well. You can use this script to instrument your app (assuming it’s using a supported framework) without changing your code! + +```bash +$ elasticapm-run --version +elasticapm-run 6.14.0 +``` + +Alternatively, you can run the entrypoint directly: + +```bash +$ python -m elasticapm.instrumentation.wrapper --version +elasticapm-run 6.14.0 +``` + +The `elasticapm-run` script can be used to run any Python script or module: + +```bash +$ elasticapm-run flask run +$ elasticapm-run python myapp.py +``` + +Generally, config should be passed in via environment variables. For example, + +```bash +$ ELASTIC_APM_SERVICE_NAME=my_flask_app elasticapm-run flask run +``` + +You can also pass config options as arguments to the script: + +```bash +$ elasticapm-run --config "service_name=my_flask_app" --config "debug=true" flask run +``` + + diff --git a/docs/release-notes.asciidoc b/docs/release-notes.asciidoc deleted file mode 100644 index c8d212db6..000000000 --- a/docs/release-notes.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -:pull: https://github.com/elastic/apm-agent-python/pull/ - -[[release-notes]] -== Release notes - -All notable changes to this project will be documented here. - -* <> -* <> -* <> -* <> -* <> -* <> - -include::../CHANGELOG.asciidoc[] diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md new file mode 100644 index 000000000..bcf8c5410 --- /dev/null +++ b/docs/release-notes/breaking-changes.md @@ -0,0 +1,35 @@ +--- +navigation_title: "Breaking changes" +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Elastic APM Python Agent breaking changes [elastic-apm-python-agent-breaking-changes] + +Before you upgrade, carefully review the Elastic APM RPython Agent breaking changes and take the necessary steps to mitigate any issues. + +% To learn how to upgrade, check out . + +% ## Next version [elastic-apm-python-agent-nextversion-breaking-changes] +% **Release date:** Month day, year + +% ::::{dropdown} Title of breaking change +% Description of the breaking change. +% For more information, check [PR #](PR link). +% **Impact**
Impact of the breaking change. +% **Action**
Steps for mitigating deprecation impact. +% :::: + +## 6.0.0 [elastic-apm-python-agent-600-breaking-changes] +**Release date:** February 1, 2021 + +* Python 2.7 and 3.5 support has been deprecated. The Python agent now requires Python 3.6+. For more information, check [#1021](https://github.com/elastic/apm-agent-python/pull/1021). +* No longer collecting body for `elasticsearch-py` update and `delete_by_query`. For more information, check [#1013](https://github.com/elastic/apm-agent-python/pull/1013). +* Align `sanitize_field_names` config with the [cross-agent spec](https://github.com/elastic/apm/blob/3fa78e2a1eeea81c73c2e16e96dbf6b2e79f3c64/specs/agents/sanitization.md). If you are using a non-default `sanitize_field_names`, surrounding each of your entries with stars (e.g. `*secret*`) will retain the old behavior. For more information, check [#982](https://github.com/elastic/apm-agent-python/pull/982). +* Remove credit card sanitization for field values. This improves performance, and the security value of this check was dubious anyway. For more information, check [#982](https://github.com/elastic/apm-agent-python/pull/982). +* Remove HTTP querystring sanitization. This improves performance, and is meant to standardize behavior across the agents, as defined in [#334](https://github.com/elastic/apm/pull/334). For more information, check [#982](https://github.com/elastic/apm-agent-python/pull/982). +* Remove `elasticapm.tag()` (deprecated since 5.0.0). For more information, check [#1034](https://github.com/elastic/apm-agent-python/pull/1034). diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md new file mode 100644 index 000000000..b2c14f742 --- /dev/null +++ b/docs/release-notes/deprecations.md @@ -0,0 +1,44 @@ +--- +navigation_title: "Deprecations" +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Elastic APM Python Agent deprecations [elastic-apm-python-agent-deprecations] +Review the deprecated functionality for your Elastic APM Python Agent version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. + +% To learn how to upgrade, check out . + +% ## Next version +% **Release date:** Month day, year + +% ::::{dropdown} Deprecation title +% Description of the deprecation. +% For more information, check [PR #](PR link). +% **Impact**
Impact of deprecation. +% **Action**
Steps for mitigating deprecation impact. +% :::: + +## 6.23.0 [elastic-apm-python-agent-6230-deprecations] +**Release date:** July 30, 2024 + +* Python 3.6 support will be removed in version 7.0.0 of the agent. +* The log shipping LoggingHandler will be removed in version 7.0.0 of the agent. +* The log shipping feature in the Flask instrumentation will be removed in version 7.0.0 of the agent. +* The log shipping feature in the Django instrumentation will be removed in version 7.0.0 of the agent. +* The OpenTracing bridge will be removed in version 7.0.0 of the agent. +* Celery 4.0 support is deprecated because it’s not installable anymore with a modern pip. + +## 6.20.0 [elastic-apm-python-agent-6200-deprecations] +**Release date:** January 10, 2024 + +The log shipping LoggingHandler will be removed in version 7.0.0 of the agent. + +## 6.19.0 [elastic-apm-python-agent-6190-deprecations] +**Release date:** October 11, 2023 + +The log shipping feature in the Flask instrumentation will be removed in version 7.0.0 of the agent. \ No newline at end of file diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md new file mode 100644 index 000000000..d29cec09c --- /dev/null +++ b/docs/release-notes/index.md @@ -0,0 +1,591 @@ +--- +navigation_title: "Elastic APM Python Agent" +mapped_pages: + - https://www.elastic.co/guide/en/apm/agent/python/current/release-notes-6.x.html + - https://www.elastic.co/guide/en/apm/agent/python/current/release-notes.html +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Elastic APM Python Agent release notes [elastic-apm-python-agent-release-notes] + +Review the changes, fixes, and more in each version of Elastic APM Python Agent. + +To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). + +% Release notes includes only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. + +% ## version.next [elastic-apm-python-agent-versionext-release-notes] +% **Release date:** Month day, year + +% ### Features and enhancements [elastic-apm-python-agent-versionext-features-enhancements] + +% ### Fixes [elastic-apm-python-agent-versionext-fixes] + +## 6.25.0 [elastic-apm-python-agent-6250-release-notes] +**Release date:** December 23, 2025 + +### Features and enhancements [elastic-apm-python-agent-6250-features-enhancements] + +* Introduce `ELASTIC_APM_SKIP_SERVER_INFO` to reduce overhead on serverless with APM server 8.7.1+ [#2516](https://github.com/elastic/apm-agent-python/pull/2516) +* List all exported symbols in elasticapm module `__all__` [#2504](https://github.com/elastic/apm-agent-python/pull/2504) + +### Fixes [elastic-apm-python-agent-6250-fixes] + +* Handle Tornado 6.5.3 `HttpHeaders` `in` operator behavior change [#2512](https://github.com/elastic/apm-agent-python/pull/2512) + +## 6.24.1 [elastic-apm-python-agent-6241-release-notes] +**Release date:** November 21, 2025 + +### Fixes [elastic-apm-python-agent-6241-fixes] + +* Fix handling of psycopg ServerCursor and AsyncServerCursor instrumentation [#2489](https://github.com/elastic/apm-agent-python/pull/2489) +* Fix contrib/opentelemetry `set_status` to match base signature [#2457](https://github.com/elastic/apm-agent-python/pull/2457) + +## 6.24.0 [elastic-apm-python-agent-6240-release-notes] +**Release date:** August 12, 2025 + +### Features and enhancements [elastic-apm-python-agent-6240-features-enhancements] +* Add support for recent sanic versions [#2190](https://github.com/elastic/apm-agent-python/pull/2190), [#2194](https://github.com/elastic/apm-agent-python/pull/2194) +* Make server certificate verification mandatory in fips mode [#2227](https://github.com/elastic/apm-agent-python/pull/2227) +* Add support Python 3.13 [#2216](https://github.com/elastic/apm-agent-python/pull/2216) +* Add support for azure-data-tables package for azure instrumentation [#2187](https://github.com/elastic/apm-agent-python/pull/2187) +* Add span links from SNS messages [#2363](https://github.com/elastic/apm-agent-python/pull/2363) + +### Fixes [elastic-apm-python-agent-6240-fixes] +* Fix psycopg2 cursor execute and executemany signatures [#2331](https://github.com/elastic/apm-agent-python/pull/2331) +* Fix psycopg cursor execute and executemany signatures [#2332](https://github.com/elastic/apm-agent-python/pull/2332) +* Fix asgi middleware distributed tracing [#2334](https://github.com/elastic/apm-agent-python/pull/2334) +* Fix typing of start in Span / capture_span to float [#2335](https://github.com/elastic/apm-agent-python/pull/2335) +* Fix azure instrumentation client_class and metrics sets invocation [#2337](https://github.com/elastic/apm-agent-python/pull/2337) +* Fix mysql_connector instrumentation connection retrieval [#2344](https://github.com/elastic/apm-agent-python/pull/2344) +* Remove spurious Django QuerySet evaluation in case of database errors [#2158](https://github.com/elastic/apm-agent-python/pull/2158) + +## 6.23.0 [elastic-apm-python-agent-6230-release-notes] +**Release date:** July 30, 2024 + +### Features and enhancements [elastic-apm-python-agent-6230-features-enhancements] +* Make published Docker images multi-platform with the addition of linux/arm64 [#2080](https://github.com/elastic/apm-agent-python/pull/2080) + +### Fixes [elastic-apm-python-agent-6230-fixes] +* Fix handling consumer iteration if transaction not sampled in kafka instrumentation [#2075](https://github.com/elastic/apm-agent-python/pull/2075) +* Fix race condition with urllib3 at shutdown [#2085](https://github.com/elastic/apm-agent-python/pull/2085) +* Fix compatibility with setuptools>=72 that removed test command [#2090](https://github.com/elastic/apm-agent-python/pull/2090) + +## 6.22.3 [elastic-apm-python-agent-6223-release-notes] +**Release date:** June 10, 2024 + +### Fixes [elastic-apm-python-agent-6223-fixes] +* Fix outcome in ASGI and Starlette apps on error status codes without an exception [#2060](https://github.com/elastic/apm-agent-python/pull/2060) + +## 6.22.2 [elastic-apm-python-agent-6222-release-notes] +**Release date:** May 20, 2024 + +### Fixes [elastic-apm-python-agent-6222-fixes] +* Fix CI release workflow [#2046](https://github.com/elastic/apm-agent-python/pull/2046) + +## 6.22.1 [elastic-apm-python-agent-6222-release-notes] +**Release date:** May 17, 2024 + +### Features and enhancements [elastic-apm-python-agent-6221-features-enhancements] +* Relax wrapt dependency to only exclude 1.15.0 [#2005](https://github.com/elastic/apm-agent-python/pull/2005) + +## 6.22.0 [elastic-apm-python-agent-6220-release-notes] +**Release date:** April 3, 2024 + +### Features and enhancements [elastic-apm-python-agent-6220-features-enhancements] +* Add ability to override default JSON serialization [#2018](https://github.com/elastic/apm-agent-python/pull/2018) + +## 6.21.4 [elastic-apm-python-agent-6214-release-notes] +**Release date:** March 19, 2024 + +### Fixes [elastic-apm-python-agent-6214-fixes] +* Fix urllib3 2.0.1+ crash with many args [#2002](https://github.com/elastic/apm-agent-python/pull/2002) + +## 6.21.3 [elastic-apm-python-agent-6213-release-notes] +**Release date:** March 8, 2024 + +### Fixes [elastic-apm-python-agent-6213-fixes] +* Fix artifacts download in CI workflows [#1996](https://github.com/elastic/apm-agent-python/pull/1996) + +## 6.21.2 [elastic-apm-python-agent-6212-release-notes] +**Release date:** March 7, 2024 + +### Fixes [elastic-apm-python-agent-6212-fixes] +* Fix artifacts upload in CI build-distribution workflow [#1993](https://github.com/elastic/apm-agent-python/pull/1993) + +## 6.21.1 [elastic-apm-python-agent-6211-release-notes] +**Release date:** March 7, 2024 + +### Fixes [elastic-apm-python-agent-6211-fixes] +* Fix CI release workflow [#1990](https://github.com/elastic/apm-agent-python/pull/1990) + +## 6.21.0 [elastic-apm-python-agent-6210-release-notes] +**Release date:** March 6, 2024 + +### Fixes [elastic-apm-python-agent-6210-fixes] +* Fix starlette middleware setup without client argument [#1952](https://github.com/elastic/apm-agent-python/pull/1952) +* Fix blocking of gRPC stream-to-stream requests [#1967](https://github.com/elastic/apm-agent-python/pull/1967) +* Always take into account body reading time for starlette requests [#1970](https://github.com/elastic/apm-agent-python/pull/1970) +* Make urllib3 transport tests more robust against local env [#1969](https://github.com/elastic/apm-agent-python/pull/1969) +* Clarify starlette integration documentation [#1956](https://github.com/elastic/apm-agent-python/pull/1956) +* Make dbapi2 query scanning for dollar quotes a bit more correct [#1976](https://github.com/elastic/apm-agent-python/pull/1976) +* Normalize headers in AWS Lambda integration on API Gateway v1 requests [#1982](https://github.com/elastic/apm-agent-python/pull/1982) + +## 6.20.0 [elastic-apm-python-agent-6200-release-notes] +**Release date:** January 10, 2024 + +### Features and enhancements [elastic-apm-python-agent-6200-features-enhancements] +* Async support for dbapi2 (starting with psycopg) [#1944](https://github.com/elastic/apm-agent-python/pull/1944) +* Add object name to procedure call spans in dbapi2 [#1938](https://github.com/elastic/apm-agent-python/pull/1938) +* Add support for python 3.10 and 3.11 lambda runtimes + +### Fixes [elastic-apm-python-agent-6200-fixes] +* Fix asyncpg support for 0.29+ [#1935](https://github.com/elastic/apm-agent-python/pull/1935) +* Fix dbapi2 signature extraction to handle square brackets in table name [#1947](https://github.com/elastic/apm-agent-python/pull/1947) + +## 6.19.0 [elastic-apm-python-agent-6190-release-notes] +**Release date:** October 11, 2023 + +### Features and enhancements [elastic-apm-python-agent-6190-features-enhancements] +* Add Python 3.12 support +* Collect the `configured_hostname` and `detected_hostname` separately, and switch to FQDN for the `detected_hostname`. [#1891](https://github.com/elastic/apm-agent-python/pull/1891) +* Improve postgres dollar-quote detection to be much faster [#1905](https://github.com/elastic/apm-agent-python/pull/1905) + +### Fixes [elastic-apm-python-agent-6190-fixes] +* Fix url argument fetching in aiohttp_client instrumentation [#1890](https://github.com/elastic/apm-agent-python/pull/1890) +* Fix a bug in the AWS Lambda instrumentation when `event["headers"] is None` [#1907](https://github.com/elastic/apm-agent-python/pull/1907) +* Fix a bug in AWS Lambda where metadata could be incomplete, causing validation errors with the APM Server [#1914](https://github.com/elastic/apm-agent-python/pull/1914) +* Fix a bug in AWS Lambda where sending the partial transaction would be recorded as an extra span [#1914](https://github.com/elastic/apm-agent-python/pull/1914) + +## 6.18.0 [elastic-apm-python-agent-6180-release-notes] +**Release date:** July 25, 2023 + +### Features and enhancements [elastic-apm-python-agent-6180-features-enhancements] +* Add support for grpc aio server interceptor [#1870](https://github.com/elastic/apm-agent-python/pull/1870) + +### Fixes [elastic-apm-python-agent-6180-fixes] +* Fix a bug in the Elasticsearch client instrumentation which was causing loss of database context (including statement) when interacting with Elastic Cloud [#1878](https://github.com/elastic/apm-agent-python/pull/1878) + +## 6.17.0 [elastic-apm-python-agent-6170-release-notes] +**Release date:** July 3, 2023 + +### Features and enhancements [elastic-apm-python-agent-6170-features-enhancements] +* Add `server_ca_cert_file` option to provide custom CA certificate [#1852](https://github.com/elastic/apm-agent-python/pull/1852) +* Add `include_process_args` option to allow users to opt-in to collecting process args [#1867](https://github.com/elastic/apm-agent-python/pull/1867) + +### Fixes [elastic-apm-python-agent-6170-fixes] +* Fix a bug in the GRPC instrumentation when reaching the maximum amount of spans per transaction [#1861](https://github.com/elastic/apm-agent-python/pull/1861) + +## 6.16.2 [elastic-apm-python-agent-6162-release-notes] +**Release date:** June 12, 2023 + +### Fixes [elastic-apm-python-agent-6162-fixes] +* Fix compatibility issue with older versions of OpenSSL in lambda runtimes [#1847](https://github.com/elastic/apm-agent-python/pull/1847) +* Add `latest` tag to docker images [#1848](https://github.com/elastic/apm-agent-python/pull/1848) +* Fix issue with redacting `user:pass` in URLs in Python 3.11.4 [#1850](https://github.com/elastic/apm-agent-python/pull/1850) + +## 6.16.1 [elastic-apm-python-agent-6161-release-notes] +**Release date:** June 6, 2023 + +### Fixes [elastic-apm-python-agent-6161-fixes] +* Fix release process for docker and the lambda layer [#1845](https://github.com/elastic/apm-agent-python/pull/1845) + +## 6.16.0 [elastic-apm-python-agent-6160-release-notes] +**Release date:** June 5, 2023 + +### Features and enhancements [elastic-apm-python-agent-6160-features-enhancements] +* Add lambda layer for instrumenting AWS Lambda functions [#1826](https://github.com/elastic/apm-agent-python/pull/1826) +* Implement instrumentation of Azure Functions [#1766](https://github.com/elastic/apm-agent-python/pull/1766) +* Add support for Django to wrapper script [#1780](https://github.com/elastic/apm-agent-python/pull/1780) +* Add support for Starlette to wrapper script [#1830](https://github.com/elastic/apm-agent-python/pull/1830) +* Add `transport_json_serializer` configuration option [#1777](https://github.com/elastic/apm-agent-python/pull/1777) +* Add S3 bucket and key name to OTel attributes [#1790](https://github.com/elastic/apm-agent-python/pull/1790) +* Implement partial transaction support in AWS lambda [#1784](https://github.com/elastic/apm-agent-python/pull/1784) +* Add instrumentation for redis.asyncio [#1807](https://github.com/elastic/apm-agent-python/pull/1807) +* Add support for urllib3 v2.0.1+ [#1822](https://github.com/elastic/apm-agent-python/pull/1822) +* Add `service.environment` to log correlation [#1833](https://github.com/elastic/apm-agent-python/pull/1833) +* Add `ecs_logging` as a dependency [#1840](https://github.com/elastic/apm-agent-python/pull/1840) +* Add support for synchronous psycopg3 [#1841](https://github.com/elastic/apm-agent-python/pull/1841) + +### Fixes [elastic-apm-python-agent-6160-fixes] +* Fix spans being dropped if they don’t have a name [#1770](https://github.com/elastic/apm-agent-python/pull/1770) +* Fix AWS Lambda support when `event` is not a dict [#1775](https://github.com/elastic/apm-agent-python/pull/1775) +* Fix deprecation warning with urllib3 2.0.0 pre-release versions [#1778](https://github.com/elastic/apm-agent-python/pull/1778) +* Fix `activation_method` to only send to APM server 8.7.1+ [#1787](https://github.com/elastic/apm-agent-python/pull/1787) +* Fix span.context.destination.service.resource for S3 spans to have an "s3/" prefix. [#1783](https://github.com/elastic/apm-agent-python/pull/1783) + +**Note**: While this is considered a bugfix, it can potentially be a breaking change in the Kibana APM app: It can break the history of the S3-Spans / metrics for users relying on `context.destination.service.resource`. If users happen to run agents both with and without this fix (for same or different languages), the same S3-buckets can appear twice in the service map (with and without s3-prefix). + +* Fix instrumentation to not bubble up exceptions during instrumentation [#1791](https://github.com/elastic/apm-agent-python/pull/1791) +* Fix HTTP transport to not print useless and confusing stack trace [#1809](https://github.com/elastic/apm-agent-python/pull/1809) + +## 6.15.1 [elastic-apm-python-agent-6151-release-notes] +**Release date:** March 6, 2023 + +### Fixes [elastic-apm-python-agent-6151-fixes] +* Fix issue with botocore instrumentation creating spans with an incorrect `service.name` [#1765](https://github.com/elastic/apm-agent-python/pull/1765) +* Fix a bug in the GRPC instrumentation when the agent is disabled or not recording [#1761](https://github.com/elastic/apm-agent-python/pull/1761) + +## 6.15.0 [elastic-apm-python-agent-6150-release-notes] +**Release date:** February 16, 2023 + +### Features and enhancements [elastic-apm-python-agent-6150-features-enhancements] +* Add `service.agent.activation_method` to the metadata [#1743](https://github.com/elastic/apm-agent-python/pull/1743) + +### Fixes [elastic-apm-python-agent-6150-fixes] +* Small fix to underlying Starlette logic to prevent duplicate Client objects [#1735](https://github.com/elastic/apm-agent-python/pull/1735) +* Change `server_url` default to `http://127.0.0.1:8200` to avoid ipv6 ambiguity [#1744](https://github.com/elastic/apm-agent-python/pull/1744) +* Fix an issue in GRPC instrumentation with unsampled transactions [#1740](https://github.com/elastic/apm-agent-python/pull/1740) +* Fix error in async Elasticsearch instrumentation when spans are dropped [#1758](https://github.com/elastic/apm-agent-python/pull/1758) + +## 6.14.0 [elastic-apm-python-agent-6140-release-notes] +**Release date:** January 30, 2023 + +### Features and enhancements [elastic-apm-python-agent-6140-features-enhancements] +* GRPC support [#1703](https://github.com/elastic/apm-agent-python/pull/1703) +* Wrapper script Flask support (experimental) [#1709](https://github.com/elastic/apm-agent-python/pull/1709) + +### Fixes [elastic-apm-python-agent-6140-fixes] +* Fix an async issue with long elasticsearch queries [#1725](https://github.com/elastic/apm-agent-python/pull/1725) +* Fix a minor inconsistency with the W3C tracestate spec [#1728](https://github.com/elastic/apm-agent-python/pull/1728) +* Fix a cold start performance issue with our AWS Lambda integration [#1727](https://github.com/elastic/apm-agent-python/pull/1727) +* Mark `**kwargs` config usage in our AWS Lambda integration as deprecated [#1727](https://github.com/elastic/apm-agent-python/pull/1727) + +## 6.13.2 [elastic-apm-python-agent-6132-release-notes] +**Release date:** November 17, 2022 + +### Fixes [elastic-apm-python-agent-6132-fixes] +* Fix error in Elasticsearch instrumentation when spans are dropped [#1690](https://github.com/elastic/apm-agent-python/pull/1690) +* Lower log level for errors in APM Server version fetching [#1692](https://github.com/elastic/apm-agent-python/pull/1692) +* Fix for missing parent.id when logging from a DroppedSpan under a leaf span [#1695](https://github.com/elastic/apm-agent-python/pull/1695) + +## 6.13.1 [elastic-apm-python-agent-6131-release-notes] +**Release date:** November 3, 2022 + +### Fixes [elastic-apm-python-agent-6131-fixes] +* Fix elasticsearch instrumentation for track_total_hits=False [#1687](https://github.com/elastic/apm-agent-python/pull/1687) + +## 6.13.0 [elastic-apm-python-agent-6130-release-notes] +**Release date:** October 26, 2022 + +### Features and enhancements [elastic-apm-python-agent-6130-features-enhancements] +* Add support for Python 3.11 +* Add backend granularity data to SQL backends as well as Cassandra and pymongo [#1585](https://github.com/elastic/apm-agent-python/pull/1585), [#1639](https://github.com/elastic/apm-agent-python/pull/1639) +* Add support for instrumenting the Elasticsearch 8 Python client [#1642](https://github.com/elastic/apm-agent-python/pull/1642) +* Add `*principal*` to default `sanitize_field_names` configuration [#1664](https://github.com/elastic/apm-agent-python/pull/1664) +* Add docs and better support for custom metrics, including in AWS Lambda [#1643](https://github.com/elastic/apm-agent-python/pull/1643) +* Add support for capturing span links from AWS SQS in AWS Lambda [#1662](https://github.com/elastic/apm-agent-python/pull/1662) + +### Fixes [elastic-apm-python-agent-6130-fixes] +* Fix Django’s `manage.py check` when agent is disabled [#1632](https://github.com/elastic/apm-agent-python/pull/1632) +* Fix an issue with long body truncation for Starlette [#1635](https://github.com/elastic/apm-agent-python/pull/1635) +* Fix an issue with transaction outcomes in Flask for uncaught exceptions [#1637](https://github.com/elastic/apm-agent-python/pull/1637) +* Fix Starlette instrumentation to make sure transaction information is still present during exception handling [#1674](https://github.com/elastic/apm-agent-python/pull/1674) + +## 6.12.0 [elastic-apm-python-agent-6120-release-notes] +**Release date:** September 7, 2022 + +### Features and enhancements [elastic-apm-python-agent-6120-features-enhancements] +* Add redis query to context data for redis instrumentation [#1406](https://github.com/elastic/apm-agent-python/pull/1406) +* Add AWS request ID to all botocore spans (at `span.context.http.request.id`) [#1625](https://github.com/elastic/apm-agent-python/pull/1625) + +### Fixes [elastic-apm-python-agent-6120-fixes] +* Differentiate Lambda URLs from API Gateway in AWS Lambda integration [#1609](https://github.com/elastic/apm-agent-python/pull/1609) +* Restrict the size of Django request bodies to prevent APM Server rejection [#1610](https://github.com/elastic/apm-agent-python/pull/1610) +* Restrict length of `exception.message` for exceptions captured by the agent [#1619](https://github.com/elastic/apm-agent-python/pull/1619) +* Restrict length of Starlette request bodies [#1549](https://github.com/elastic/apm-agent-python/pull/1549) +* Fix error when using elasticsearch(sniff_on_start=True) [#1618](https://github.com/elastic/apm-agent-python/pull/1618) +* Improve handling of ignored URLs and capture_body=off for Starlette [#1549](https://github.com/elastic/apm-agent-python/pull/1549) +* Fix possible error in the transport flush for Lambda functions [#1628](https://github.com/elastic/apm-agent-python/pull/1628) + +## 6.11.0 [elastic-apm-python-agent-6110-release-notes] +**Release date:** August 9, 2022 + +### Features and enhancements [elastic-apm-python-agent-6110-features-enhancements] +* Added lambda support for ELB triggers [#1605](https://github.com/elastic/apm-agent-python/pull/1605) + +## 6.10.2 [elastic-apm-python-agent-6102-release-notes] +**Release date:** August 9, 2022 + +### Fixes [elastic-apm-python-agent-6102-fixes] +* Fixed an issue with non-integer ports in Django [#1590](https://github.com/elastic/apm-agent-python/pull/1590) +* Fixed an issue with non-integer ports in Redis [#1591](https://github.com/elastic/apm-agent-python/pull/1591) +* Fixed a performance issue for local variable shortening via `varmap()` [#1593](https://github.com/elastic/apm-agent-python/pull/1593) +* Fixed `elasticapm.label()` when a Client object is not available [#1596](https://github.com/elastic/apm-agent-python/pull/1596) + +## 6.10.1 [elastic-apm-python-agent-6101-release-notes] +**Release date:** June 30, 2022 + +### Fixes [elastic-apm-python-agent-6101-fixes] +* Fix an issue with Kafka instrumentation and unsampled transactions [#1579](https://github.com/elastic/apm-agent-python/pull/1579) + +## 6.10.0 [elastic-apm-python-agent-6100-release-notes] +**Release date:** June 22, 2022 + +### Features and enhancements [elastic-apm-python-agent-6100-features-enhancements] +* Add instrumentation for [`aiobotocore`](https://github.com/aio-libs/aiobotocore) [#1520](https://github.com/elastic/apm-agent-python/pull/1520) +* Add instrumentation for [`kafka-python`](https://kafka-python.readthedocs.io/en/master/) [#1555](https://github.com/elastic/apm-agent-python/pull/1555) +* Add API for span links, and implement span link support for OpenTelemetry bridge [#1562](https://github.com/elastic/apm-agent-python/pull/1562) +* Add span links to SQS ReceiveMessage call [#1575](https://github.com/elastic/apm-agent-python/pull/1575) +* Add specific instrumentation for SQS delete/batch-delete [#1567](https://github.com/elastic/apm-agent-python/pull/1567) +* Add `trace_continuation_strategy` setting [#1564](https://github.com/elastic/apm-agent-python/pull/1564) + +### Fixes [elastic-apm-python-agent-6100-fixes] +* Fix return for `opentelemetry.Span.is_recording()` [#1530](https://github.com/elastic/apm-agent-python/pull/1530) +* Fix error logging for bad SERVICE_NAME config [#1546](https://github.com/elastic/apm-agent-python/pull/1546) +* Do not instrument old versions of Tornado > 6.0 due to incompatibility [#1566](https://github.com/elastic/apm-agent-python/pull/1566) +* Fix transaction names for class based views in Django 4.0+ [#1571](https://github.com/elastic/apm-agent-python/pull/1571) +* Fix a problem with our logging handler failing to report internal errors in its emitter [#1568](https://github.com/elastic/apm-agent-python/pull/1568) + +## 6.9.1 [elastic-apm-python-agent-691-release-notes] +**Release date:** March 30, 2022 + +### Fixes [elastic-apm-python-agent-691-fixes] +* Fix `otel_attributes`-related regression with older versions of APM Server (<7.16) [#1510](https://github.com/elastic/apm-agent-python/pull/1510) + +## 6.9.0 [elastic-apm-python-agent-690-release-notes] +**Release date:** March 29, 2022 + +### Features and enhancements [elastic-apm-python-agent-690-features-enhancements] +* Add OpenTelemetry API bridge [#1411](https://github.com/elastic/apm-agent-python/pull/1411) +* Change default for `sanitize_field_names` to sanitize `*auth*` instead of `authorization` [#1494](https://github.com/elastic/apm-agent-python/pull/1494) +* Add `span_stack_trace_min_duration` to replace deprecated `span_frames_min_duration` [#1498](https://github.com/elastic/apm-agent-python/pull/1498) +* Enable exact_match span compression by default [#1504](https://github.com/elastic/apm-agent-python/pull/1504) +* Allow parent celery tasks to specify the downstream `parent_span_id` in celery headers [#1500](https://github.com/elastic/apm-agent-python/pull/1500) + +### Fixes [elastic-apm-python-agent-690-fixes] +* Fix Sanic integration to properly respect the `capture_body` config [#1485](https://github.com/elastic/apm-agent-python/pull/1485) +* Lambda fixes to align with the cross-agent spec [#1489](https://github.com/elastic/apm-agent-python/pull/1489) +* Lambda fix for custom `service_name` [#1493](https://github.com/elastic/apm-agent-python/pull/1493) +* Change default for `stack_trace_limit` from 500 to 50 [#1492](https://github.com/elastic/apm-agent-python/pull/1492) +* Switch all duration handling to use `datetime.timedelta` objects [#1488](https://github.com/elastic/apm-agent-python/pull/1488) + +## 6.8.1 [elastic-apm-python-agent-681-release-notes] +**Release date:** March 9, 2022 + +### Fixes [elastic-apm-python-agent-681-fixes] +* Fix `exit_span_min_duration` and disable by default [#1483](https://github.com/elastic/apm-agent-python/pull/1483) + +## 6.8.0 [elastic-apm-python-agent-680-release-notes] +**Release date:** February 22, 2022 + +### Features and enhancements [elastic-apm-python-agent-680-features-enhancements] +* use "unknown-python-service" as default service name if no service name is configured [#1438](https://github.com/elastic/apm-agent-python/pull/1438) +* add transaction name to error objects [#1441](https://github.com/elastic/apm-agent-python/pull/1441) +* don’t send unsampled transactions to APM Server 8.0+ [#1442](https://github.com/elastic/apm-agent-python/pull/1442) +* implement snapshotting of certain configuration during transaction lifetime [#1431](https://github.com/elastic/apm-agent-python/pull/1431) +* propagate traceparent IDs via Celery [#1371](https://github.com/elastic/apm-agent-python/pull/1371) +* removed Python 2 compatibility shims [#1463](https://github.com/elastic/apm-agent-python/pull/1463) + +**Note:** Python 2 support was already removed with version 6.0 of the agent, this now removes unused compatibilit shims. + +### Fixes [elastic-apm-python-agent-680-fixes] +* fix span compression for redis, mongodb, cassandra and memcached [#1444](https://github.com/elastic/apm-agent-python/pull/1444) +* fix recording of status_code for starlette [#1466](https://github.com/elastic/apm-agent-python/pull/1466) +* fix aioredis span context handling [#1462](https://github.com/elastic/apm-agent-python/pull/1462) + +## 6.7.2 [elastic-apm-python-agent-672-release-notes] +**Release date:** December 7, 2021 + +### Fixes [elastic-apm-python-agent-672-fixes] +* fix AttributeError in sync instrumentation of httpx [#1423](https://github.com/elastic/apm-agent-python/pull/1423) +* add setting to disable span compression, default to disabled [#1429](https://github.com/elastic/apm-agent-python/pull/1429) + +## 6.7.1 [elastic-apm-python-agent-671-release-notes] +**Release date:** November 29, 2021 + +### Fixes [elastic-apm-python-agent-671-fixes] +* fix an issue with Sanic exception tracking [#1414](https://github.com/elastic/apm-agent-python/pull/1414) +* asyncpg: Limit SQL queries in context data to 10000 characters [#1416](https://github.com/elastic/apm-agent-python/pull/1416) + +## 6.7.0 [elastic-apm-python-agent-670-release-notes] +**Release date:** November 17, 2021 + +### Features and enhancements [elastic-apm-python-agent-670-features-enhancements] +* Add support for Sanic framework [#1390](https://github.com/elastic/apm-agent-python/pull/1390) + +### Fixes [elastic-apm-python-agent-670-fixes] +* fix compatibility issues with httpx 0.21 [#1403](https://github.com/elastic/apm-agent-python/pull/1403) +* fix `span_compression_exact_match_max_duration` default value [#1407](https://github.com/elastic/apm-agent-python/pull/1407) + +## 6.6.3 [elastic-apm-python-agent-663-release-notes] +**Release date:** November 15, 2021 + +### Fixes [elastic-apm-python-agent-663-fixes] +* fix an issue with `metrics_sets` configuration referencing the `TransactionMetricSet` removed in 6.6.2 [#1397](https://github.com/elastic/apm-agent-python/pull/1397) + +## 6.6.2 [elastic-apm-python-agent-662-release-notes] +**Release date:** November 10, 2021 + +### Fixes [elastic-apm-python-agent-662-fixes] +* Fix an issue where compressed spans would count against `transaction_max_spans` [#1377](https://github.com/elastic/apm-agent-python/pull/1377) +* Make sure HTTP connections are not re-used after a process fork [#1374](https://github.com/elastic/apm-agent-python/pull/1374) +* Fix an issue with psycopg2 instrumentation when multiple hosts are defined [#1386](https://github.com/elastic/apm-agent-python/pull/1386) +* Update the `User-Agent` header to the new [spec](https://github.com/elastic/apm/pull/514) [#1378](https://github.com/elastic/apm-agent-python/pull/1378) +* Improve status_code handling in AWS Lambda integration [#1382](https://github.com/elastic/apm-agent-python/pull/1382) +* Fix `aiohttp` exception handling to allow for non-500 responses including `HTTPOk` [#1384](https://github.com/elastic/apm-agent-python/pull/1384) +* Force transaction names to strings [#1389](https://github.com/elastic/apm-agent-python/pull/1389) +* Remove unused `http.request.socket.encrypted` context field [#1332](https://github.com/elastic/apm-agent-python/pull/1332) +* Remove unused transaction metrics (APM Server handles these metrics instead) [#1388](https://github.com/elastic/apm-agent-python/pull/1388) + +## 6.6.1 [elastic-apm-python-agent-661-release-notes] +**Release date:** November 2, 2021 + +### Fixes [elastic-apm-python-agent-661-fixes] +* Fix some context fields and metadata handling in AWS Lambda support [#1368](https://github.com/elastic/apm-agent-python/pull/1368) + +## 6.6.0 [elastic-apm-python-agent-660-release-notes] +**Release date:** October 18, 2021 + +### Features and enhancements [elastic-apm-python-agent-660-features-enhancements] +* Add experimental support for AWS lambda instrumentation [#1193](https://github.com/elastic/apm-agent-python/pull/1193) +* Add support for span compression [#1321](https://github.com/elastic/apm-agent-python/pull/1321) +* Auto-infer destination resources for easier instrumentation of new resources [#1359](https://github.com/elastic/apm-agent-python/pull/1359) +* Add support for dropped span statistics [#1327](https://github.com/elastic/apm-agent-python/pull/1327) + +### Fixes [elastic-apm-python-agent-660-fixes] +* Ensure that Prometheus histograms are encoded correctly for APM Server [#1354](https://github.com/elastic/apm-agent-python/pull/1354) +* Remove problematic (and duplicate) `event.dataset` from logging integrations [#1365](https://github.com/elastic/apm-agent-python/pull/1365) +* Fix for memcache instrumentation when configured with a unix socket [#1357](https://github.com/elastic/apm-agent-python/pull/1357) + +## 6.5.0 [elastic-apm-python-agent-650-release-notes] +**Release date:** October 4, 2021 + +### Features and enhancements [elastic-apm-python-agent-650-features-enhancements] +* Add instrumentation for Azure Storage (blob/table/fileshare) and Azure Queue [#1316](https://github.com/elastic/apm-agent-python/pull/1316) + +### Fixes [elastic-apm-python-agent-650-fixes] +* Improve span coverage for `asyncpg` [#1328](https://github.com/elastic/apm-agent-python/pull/1328) +* aiohttp: Correctly pass custom client to tracing middleware [#1345](https://github.com/elastic/apm-agent-python/pull/1345) +* Fixed an issue with httpx instrumentation [#1337](https://github.com/elastic/apm-agent-python/pull/1337) +* Fixed an issue with Django 4.0 removing a private method [#1347](https://github.com/elastic/apm-agent-python/pull/1347) + +## 6.4.0 [elastic-apm-python-agent-640-release-notes] +**Release date:** August 31, 2021 + +### Features and enhancements [elastic-apm-python-agent-640-features-enhancements] +* Rename the experimental `log_ecs_formatting` config to `log_ecs_reformatting` [#1300](https://github.com/elastic/apm-agent-python/pull/1300) +* Add support for Prometheus histograms [#1165](https://github.com/elastic/apm-agent-python/pull/1165) + +### Fixes [elastic-apm-python-agent-640-fixes] +* Fixed cookie sanitization when Cookie is capitalized [#1301](https://github.com/elastic/apm-agent-python/pull/1301) +* Fix a bug with exception capturing for bad UUIDs [#1304](https://github.com/elastic/apm-agent-python/pull/1304) +* Fix potential errors in json serialization [#1203](https://github.com/elastic/apm-agent-python/pull/1203) +* Fix an issue with certain aioredis commands [#1308](https://github.com/elastic/apm-agent-python/pull/1308) + +## 6.3.3 [elastic-apm-python-agent-633-release-notes] +**Release date:** July 14, 2021 + +### Fixes [elastic-apm-python-agent-633-fixes] +* ensure that the elasticsearch instrumentation handles DroppedSpans correctly [#1190](https://github.com/elastic/apm-agent-python/pull/1190) + +## 6.3.2 [elastic-apm-python-agent-632-release-notes] +**Release date:** July 7, 2021 + +### Fixes [elastic-apm-python-agent-632-fixes] +* Fix handling of non-http scopes in Starlette/FastAPI middleware [#1187](https://github.com/elastic/apm-agent-python/pull/1187) + +## 6.3.1 [elastic-apm-python-agent-631-release-notes] +**Release date:** July 7, 2021 + +### Fixes [elastic-apm-python-agent-631-fixes] +* Fix issue with Starlette/FastAPI hanging on startup [#1185](https://github.com/elastic/apm-agent-python/pull/1185) + +## 6.3.0 [elastic-apm-python-agent-630-release-notes] +**Release date:** July 6, 2021 + +### Features and enhancements [elastic-apm-python-agent-630-features-enhancements] +* Add additional context information about elasticsearch client requests [#1108](https://github.com/elastic/apm-agent-python/pull/1108) +* Add `use_certifi` config option to allow users to disable `certifi` [#1163](https://github.com/elastic/apm-agent-python/pull/1163) + +### Fixes [elastic-apm-python-agent-630-fixes] +* Fix for Starlette 0.15.0 error collection [#1174](https://github.com/elastic/apm-agent-python/pull/1174) +* Fix for Starlette static files [#1137](https://github.com/elastic/apm-agent-python/pull/1137) + +## 6.2.3 [elastic-apm-python-agent-623-release-notes] +**Release date:** June 28, 2021 + +### Fixes [elastic-apm-python-agent-623-fixes] +* suppress the default_app_config attribute in Django 3.2+ [#1155](https://github.com/elastic/apm-agent-python/pull/1155) +* bump log level for multiple set_client calls to WARNING [#1164](https://github.com/elastic/apm-agent-python/pull/1164) +* fix issue with adding disttracing to SQS messages when dropping spans [#1170](https://github.com/elastic/apm-agent-python/pull/1170) + +## 6.2.2 [elastic-apm-python-agent-622-release-notes] +**Release date:** June 7, 2021 + +### Fixes [elastic-apm-python-agent-622-fixes] +* Fix an attribute access bug introduced in 6.2.0 [#1149](https://github.com/elastic/apm-agent-python/pull/1149) + +## 6.2.1 [elastic-apm-python-agent-621-release-notes] +**Release date:** June 3, 2021 + +### Fixes [elastic-apm-python-agent-621-fixes] +* catch and log exceptions in interval timer threads [#1145](https://github.com/elastic/apm-agent-python/pull/1145) + +## 6.2.0 [elastic-apm-python-agent-620-release-notes] +**Release date:** May 31, 2021 + +### Features and enhancements [elastic-apm-python-agent-620-features-enhancements] +* Added support for aioredis 1.x [#2526](https://github.com/elastic/apm-agent-python/pull/1082) +* Added support for aiomysql [#1107](https://github.com/elastic/apm-agent-python/pull/1107) +* Added Redis pub/sub instrumentation [#1129](https://github.com/elastic/apm-agent-python/pull/1129) +* Added specific instrumentation for AWS SQS [#1123](https://github.com/elastic/apm-agent-python/pull/1123) + +### Fixes [elastic-apm-python-agent-620-fixes] +* ensure metrics are flushed before agent shutdown [#1139](https://github.com/elastic/apm-agent-python/pull/1139) +* added safeguard for exceptions in processors [#1138](https://github.com/elastic/apm-agent-python/pull/1138) +* ensure sockets are closed which were opened for cloud environment detection [#1134](https://github.com/elastic/apm-agent-python/pull/1134) + +## 6.1.3 [elastic-apm-python-agent-613-release-notes] +**Release date:** April 28, 2021 + +### Fixes [elastic-apm-python-agent-613-fixes] +* added destination information to asyncpg instrumentation [#1115](https://github.com/elastic/apm-agent-python/pull/1115) +* fixed issue with collecting request meta data with Django REST Framework [#1117](https://github.com/elastic/apm-agent-python/pull/1117) +* fixed httpx instrumentation for newly released httpx 0.18.0 [#1118](https://github.com/elastic/apm-agent-python/pull/1118) + +## 6.1.2 [elastic-apm-python-agent-612-release-notes] +**Release date:** April 14, 2021 + +### Fixes [elastic-apm-python-agent-612-fixes] +* fixed issue with empty transaction name for the root route with Django [#1095](https://github.com/elastic/apm-agent-python/pull/1095) +* fixed on-the-fly initialisation of Flask apps [#1099](https://github.com/elastic/apm-agent-python/pull/1099) + +## 6.1.1 [elastic-apm-python-agent-611-release-notes] +**Release date:** April 8, 2021 + +### Fixes [elastic-apm-python-agent-611-fixes] +* fixed a validation issue with the newly introduced instrumentation for S3, SNS and DynamoDB [#1090](https://github.com/elastic/apm-agent-python/pull/1090) + +## 6.1.0 [elastic-apm-python-agent-610-release-notes] +**Release date:** March 31, 2021 + +### Features and enhancements [elastic-apm-python-agent-610-features-enhancements] +* Add global access to Client singleton object at `elasticapm.get_client()` [#1043](https://github.com/elastic/apm-agent-python/pull/1043) +* Add `log_ecs_formatting` config option [#1058](https://github.com/elastic/apm-agent-python/pull/1058) [#1063](https://github.com/elastic/apm-agent-python/pull/1063) +* Add instrumentation for httplib2 [#1031](https://github.com/elastic/apm-agent-python/pull/1031) +* Add better instrumentation for some AWS services (S3, SNS, DynamoDB) [#1054](https://github.com/elastic/apm-agent-python/pull/1054) +* Added beta support for collecting metrics from prometheus_client [#1083](https://github.com/elastic/apm-agent-python/pull/1083) + +### Fixes [elastic-apm-python-agent-610-fixes] +* Fix for potential `capture_body: error` hang in Starlette/FastAPI [#1038](https://github.com/elastic/apm-agent-python/pull/1038) +* Fix a rare error around processing stack frames [#1012](https://github.com/elastic/apm-agent-python/pull/1012) +* Fix for Starlette/FastAPI to correctly capture request bodies as strings [#1041](https://github.com/elastic/apm-agent-python/pull/1042) +* Fix transaction names for Starlette Mount routes [#1037](https://github.com/elastic/apm-agent-python/pull/1037) +* Fix for elastic excepthook arguments [#1050](https://github.com/elastic/apm-agent-python/pull/1050) +* Fix issue with remote configuration when resetting config values [#1068](https://github.com/elastic/apm-agent-python/pull/1068) +* Use a label for the elasticapm Django app that is compatible with Django 3.2 validation [#1064](https://github.com/elastic/apm-agent-python/pull/1064) +* Fix an issue with undefined routes in Starlette [#1076](https://github.com/elastic/apm-agent-python/pull/1076) + +## 6.0.0 [elastic-apm-python-agent-600-release-notes] +**Release date:** February 1, 2021 + +### Fixes [elastic-apm-python-agent-600-fixes] +* Fix for GraphQL span spamming from scalar fields with required flag [#1015](https://github.com/elastic/apm-agent-python/pull/1015) + + diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md new file mode 100644 index 000000000..4972fdbc9 --- /dev/null +++ b/docs/release-notes/known-issues.md @@ -0,0 +1,29 @@ +--- +navigation_title: "Known issues" +applies_to: + stack: + serverless: + observability: + product: + apm_agent_python: ga +--- + +# Elastic APM Python Agent known issues [elastic-apm-python-agent-known-issues] + +Known issues are significant defects or limitations that may impact your implementation. These issues are actively being worked on and will be addressed in a future release. Review the Elastic APM Python Agent known issues to help you make informed decisions, such as upgrading to a new version. + +% Use the following template to add entries to this page. + +% :::{dropdown} Title of known issue +% **Details** +% On [Month/Day/Year], a known issue was discovered that [description of known issue]. + +% **Workaround** +% Workaround description. + +% **Resolved** +% On [Month/Day/Year], this issue was resolved. + +::: + +_No known issues_ \ No newline at end of file diff --git a/docs/release-notes/toc.yml b/docs/release-notes/toc.yml new file mode 100644 index 000000000..a41006794 --- /dev/null +++ b/docs/release-notes/toc.yml @@ -0,0 +1,5 @@ +toc: + - file: index.md + - file: known-issues.md + - file: breaking-changes.md + - file: deprecations.md \ No newline at end of file diff --git a/docs/run-tests-locally.asciidoc b/docs/run-tests-locally.asciidoc deleted file mode 100644 index fd3aa1eea..000000000 --- a/docs/run-tests-locally.asciidoc +++ /dev/null @@ -1,78 +0,0 @@ -[[run-tests-locally]] -=== Run Tests Locally - -To run tests locally you can make use of the docker images also used when running the whole test suite with Jenkins. -Running the full test suite first does some linting and then runs the actual tests with different versions of Python and different web frameworks. -For a full overview of the test matrix and supported versions have a look at -https://github.com/elastic/apm-agent-python/blob/main/Jenkinsfile[Jenkins Configuration]. - -[float] -[[pre-commit]] -==== Pre Commit -We run our git hooks on every commit to automatically point out issues in code. Those issues are also detected within the GitHub actions. -Please follow the installation steps stated in https://pre-commit.com/#install. - -[float] -[[coder-linter]] -==== Code Linter -We run two code linters `isort` and `flake8`. You can trigger each single one locally by running: - -[source,bash] ----- -$ pre-commit run -a isort ----- - -[source,bash] ----- -$ pre-commit run -a flake8 ----- - -[float] -[[coder-formatter]] -==== Code Formatter -We test that the code is formatted using `black`. You can trigger this check by running: - -[source,bash] ----- -$ pre-commit run -a black ----- - -[float] -[[test-documentation]] -==== Test Documentation -We test that the documentation can be generated without errors. You can trigger this check by running: -[source,bash] ----- -$ ./tests/scripts/docker/docs.sh ----- - -[float] -[[running-tests]] -==== Running Tests -We run the test suite on different combinations of Python versions and web frameworks. For triggering the test suite for a specific combination locally you can run: - -[source,bash] ----- -$ ./tests/scripts/docker/run_tests.sh python-version framework-version ----- -NOTE: The `python-version` must be of format `python-version`, e.g. `python-3.6` or `pypy-2`. -The `framework` must be of format `framework-version`, e.g. `django-1.10` or `flask-0.12`. - -You can also run the unit tests outside of docker, by installing the relevant -https://github.com/elastic/apm-agent-python/tree/main/tests/requirements[requirements file] -and then running `py.test` from the project root. - -==== Integration testing - -Check out https://github.com/elastic/apm-integration-testing for resources for -setting up full end-to-end testing environments. For example, to spin up -an environment with the https://github.com/basepi/opbeans-python[opbeans Django app], -with version 7.3 of the elastic stack and the apm-python-agent from your local -checkout, you might do something like this: - -[source,bash] ----- -$ ./scripts/compose.py start 7.3 \ - --with-agent-python-django --with-opbeans-python \ - --opbeans-python-agent-local-repo=~/elastic/apm-agent-python ----- diff --git a/docs/sanic.asciidoc b/docs/sanic.asciidoc deleted file mode 100644 index 83f8fd540..000000000 --- a/docs/sanic.asciidoc +++ /dev/null @@ -1,179 +0,0 @@ -[[sanic-support]] -=== Sanic Support - -Incorporating Elastic APM into your Sanic project only requires a few easy -steps. - -[float] -[[sanic-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[sanic-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, or as -initialization arguments. - -You can find a list of all available settings in the -<> page. - -To initialize the agent for your application using environment variables: - -[source,python] ----- -from sanic import Sanic -from elasticapm.contrib.sanic import ElasticAPM - -app = Sanic(name="elastic-apm-sample") -apm = ElasticAPM(app=app) ----- - -To configure the agent using initialization arguments and Sanic's Configuration infrastructure: - -[source,python] ----- -# Create a file named external_config.py in your application -# If you want this module based configuration to be used for APM, prefix them with ELASTIC_APM_ -ELASTIC_APM_SERVER_URL = "https://serverurl.apm.com:443" -ELASTIC_APM_SECRET_TOKEN = "sometoken" ----- - -[source,python] ----- -from sanic import Sanic -from elasticapm.contrib.sanic import ElasticAPM - -app = Sanic(name="elastic-apm-sample") -app.config.update_config("path/to/external_config.py") -apm = ElasticAPM(app=app) ----- - -[float] -[[sanic-usage]] -==== Usage - -Once you have configured the agent, it will automatically track transactions -and capture uncaught exceptions within sanic. - -Capture an arbitrary exception by calling -<>: - -[source,python] ----- -from sanic import Sanic -from elasticapm.contrib.sanic import ElasticAPM - -app = Sanic(name="elastic-apm-sample") -apm = ElasticAPM(app=app) - -try: - 1 / 0 -except ZeroDivisionError: - apm.capture_exception() ----- - -Log a generic message with <>: - -[source,python] ----- -from sanic import Sanic -from elasticapm.contrib.sanic import ElasticAPM - -app = Sanic(name="elastic-apm-sample") -apm = ElasticAPM(app=app) - -apm.capture_message('hello, world!') ----- - -[float] -[[sanic-performance-metrics]] -==== Performance metrics - -If you've followed the instructions above, the agent has installed our -instrumentation middleware which will process all requests through your app. -This will measure response times, as well as detailed performance data for -all supported technologies. - -NOTE: Due to the fact that `asyncio` drivers are usually separate from their -synchronous counterparts, specific instrumentation is needed for all drivers. -The support for asynchronous drivers is currently quite limited. - -[float] -[[sanic-ignoring-specific-views]] -===== Ignoring specific routes - -You can use the -<> -configuration option to ignore specific routes. The list given should be a -list of regular expressions which are matched against the transaction name: - -[source,python] ----- -from sanic import Sanic -from elasticapm.contrib.sanic import ElasticAPM - -app = Sanic(name="elastic-apm-sample") -apm = ElasticAPM(app=app, config={ - 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET /secret', '/extra_secret'], -}) ----- - -This would ignore any requests using the `GET /secret` route -and any requests containing `/extra_secret`. - -[float] -[[extended-sanic-usage]] -==== Extended Sanic APM Client Usage - -Sanic's contributed APM client also provides a few extendable way to configure selective behaviors to enhance the -information collected as part of the transactions being tracked by the APM. - -In order to enable this behavior, the APM Client middleware provides a few callback functions that you can leverage -in order to simplify the process of generating additional contexts into the traces being collected. -[cols="1,1,1,1"] -|=== -| Callback Name | Callback Invocation Format | Expected Return Format | Is Async - -| transaction_name_callback -| transaction_name_callback(request) -| string -| false - -| user_context_callback -| user_context_callback(request) -| (username_string, user_email_string, userid_string) -| true - -| custom_context_callback -| custom_context_callback(request) or custom_context_callback(response) -| dict(str=str) -| true - -| label_info_callback -| label_info_callback() -| dict(str=str) -| true -|=== - -[float] -[[supported-stanic-and-python-versions]] -==== Supported Sanic and Python versions - -A list of supported <> and -<> versions can be found on our -<> page. - -NOTE: Elastic APM only supports `asyncio` when using Python 3.7+ diff --git a/docs/serverless-azure-functions.asciidoc b/docs/serverless-azure-functions.asciidoc deleted file mode 100644 index b137c91c7..000000000 --- a/docs/serverless-azure-functions.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -[[azure-functions-support]] -=== Monitoring Azure Functions - -[float] -==== Prerequisites - -You need an APM Server to which you can send APM data. -Follow the {apm-guide-ref}/apm-quick-start.html[APM Quick start] if you have not set one up yet. -For the best-possible performance, we recommend setting up APM on {ecloud} in the same Azure region as your Azure Functions app. - -NOTE: Currently, only HTTP and timer triggers are supported. -Other trigger types may be captured as well, but the amount of captured contextual data may differ. - -[float] -==== Step 1: Enable Worker Extensions - -Elastic APM uses https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=asgi%2Capplication-level&pivots=python-mode-configuration#python-worker-extensions[Worker Extensions] -to instrument Azure Functions. -This feature is not enabled by default, and must be enabled in your Azure Functions App. -Please follow the instructions in the https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=asgi%2Capplication-level&pivots=python-mode-configuration#using-extensions[Azure docs]. - -Once you have enabled Worker Extensions, these two lines of code will enable Elastic APM's extension: - -[source,python] ----- -from elasticapm.contrib.serverless.azure import ElasticAPMExtension - -ElasticAPMExtension.configure() ----- - -Put them somewhere at the top of your Python file, before the function definitions. - -[float] -==== Step 2: Install the APM Python Agent - -You need to add `elastic-apm` as a dependency for your Functions app. -Simply add `elastic-apm` to your `requirements.txt` file. -We recommend pinning the version to the current newest version of the agent, and periodically updating the version. - -[float] -==== Step 3: Configure APM on Azure Functions - -The APM Python agent is configured through https://learn.microsoft.com/en-us/azure/azure-functions/functions-how-to-use-azure-function-app-settings?tabs=portal#settings[App Settings]. -These are then picked up by the agent as environment variables. - -For the minimal configuration, you will need the <> to set the destination for APM data and a <>. -If you prefer to use an {apm-guide-ref}/api-key.html[APM API key] instead of the APM secret token, use the <> environment variable instead of `ELASTIC_APM_SECRET_TOKEN` in the following example configuration. - -[source,bash] ----- -$ az functionapp config appsettings set --settings ELASTIC_APM_SERVER_URL=https://example.apm.northeurope.azure.elastic-cloud.com:443 -$ az functionapp config appsettings set --settings ELASTIC_APM_SECRET_TOKEN=verysecurerandomstring ----- - -You can optionally <>. - -That's it; Once the agent is installed and working, spans will be captured for -<>. You can also use -<> to capture custom spans, and -you can retrieve the `Client` object for capturing exceptions/messages -using <>. diff --git a/docs/serverless-lambda.asciidoc b/docs/serverless-lambda.asciidoc deleted file mode 100644 index 48c091390..000000000 --- a/docs/serverless-lambda.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -[[lambda-support]] -=== Monitoring AWS Lambda Python Functions -:layer-section-type: with-agent -:apm-aws-repo-dir: ./lambda - -The Python APM Agent can be used with AWS Lambda to monitor the execution of your AWS Lambda functions. - -[float] -==== Prerequisites - -You need an APM Server to send APM data to. Follow the {apm-guide-ref}/apm-quick-start.html[APM Quick start] if you have not set one up yet. For the best-possible performance, we recommend setting up APM on {ecloud} in the same AWS region as your AWS Lambda functions. - -[float] -==== Step 1: Select the AWS Region and Architecture - -include::{apm-aws-lambda-root}/docs/lambda-selector/lambda-attributes-selector.asciidoc[] - -[float] -==== Step 2: Add the APM Layers to your Lambda function - -include::{apm-aws-lambda-root}/docs/lambda-selector/extension-arn-replacement.asciidoc[] -include::./lambda/python-arn-replacement.asciidoc[] - -Both the {apm-lambda-ref}/aws-lambda-arch.html[{apm-lambda-ext}] and the Python APM Agent are added to your Lambda function as https://docs.aws.amazon.com/lambda/latest/dg/invocation-layers.html[AWS Lambda Layers]. Therefore, you need to add the corresponding Layer ARNs (identifiers) to your Lambda function. - -include::{apm-aws-lambda-root}/docs/add-extension/add-extension-layer-widget.asciidoc[] - -[float] -==== Step 3: Configure APM on AWS Lambda - -The {apm-lambda-ext} and the APM Python agent are configured through environment variables on the AWS Lambda function. - -For the minimal configuration, you will need the _APM Server URL_ to set the destination for APM data and an _{apm-guide-ref}/secret-token.html[APM Secret Token]_. -If you prefer to use an {apm-guide-ref}/api-key.html[APM API key] instead of the APM secret token, use the `ELASTIC_APM_API_KEY` environment variable instead of `ELASTIC_APM_SECRET_TOKEN` in the following configuration. - -For production environments, we recommend {apm-lambda-ref}/aws-lambda-secrets-manager.html[using the AWS Secrets Manager to store your APM authentication key] instead of providing the secret value as plaintext in the environment variables. - -include::./lambda/configure-lambda-widget.asciidoc[] -<1> The {apm-lambda-ref}/aws-lambda-config-options.html#_elastic_apm_send_strategy[`ELASTIC_APM_SEND_STRATEGY`] defines when APM data is sent to your Elastic APM backend. To reduce the execution time of your lambda functions, we recommend to use the `background` strategy in production environments with steady load scenarios. - -You can optionally <> or the {apm-lambda-ref}/aws-lambda-config-options.html[configuration of the {apm-lambda-ext}]. - -That's it. After following the steps above, you're ready to go! Your Lambda -function invocations should be traced from now on. Spans will be captured for -<>. You can also use -<> to capture custom spans, and you can -retrieve the `Client` object for capturing exceptions/messages using -<>. diff --git a/docs/set-up.asciidoc b/docs/set-up.asciidoc deleted file mode 100644 index 58e74294b..000000000 --- a/docs/set-up.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[set-up]] -== Set up the Agent - -To get you off the ground, we’ve prepared guides for setting up the Agent with different frameworks: - - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - -For custom instrumentation, see <>. - -include::./django.asciidoc[] - -include::./flask.asciidoc[] - -include::./aiohttp-server.asciidoc[] - -include::./tornado.asciidoc[] - -include::./starlette.asciidoc[] - -include::./sanic.asciidoc[] - -include::./serverless-lambda.asciidoc[] - -include::./serverless-azure-functions.asciidoc[] - -include::./wrapper.asciidoc[] - -include::./asgi-middleware.asciidoc[] diff --git a/docs/starlette.asciidoc b/docs/starlette.asciidoc deleted file mode 100644 index 8b82ee1f2..000000000 --- a/docs/starlette.asciidoc +++ /dev/null @@ -1,149 +0,0 @@ -[[starlette-support]] -=== Starlette/FastAPI Support - -Incorporating Elastic APM into your Starlette project only requires a few easy -steps. - -[float] -[[starlette-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[starlette-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, or as -initialization arguments. - -You can find a list of all available settings in the -<> page. - -To initialize the agent for your application using environment variables, add -the ElasticAPM middleware to your Starlette application: - -[source,python] ----- -from starlette.applications import Starlette -from elasticapm.contrib.starlette import ElasticAPM - -app = Starlette() -app.add_middleware(ElasticAPM) ----- - -WARNING: If you are using any `BaseHTTPMiddleware` middleware, you must add them -*before* the ElasticAPM middleware. This is because `BaseHTTPMiddleware` breaks -`contextvar` propagation, as noted -https://www.starlette.io/middleware/#limitations[here]. - -To configure the agent using initialization arguments: - -[source,python] ----- -from starlette.applications import Starlette -from elasticapm.contrib.starlette import make_apm_client, ElasticAPM - -apm = make_apm_client({ - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', -}) -app = Starlette() -app.add_middleware(ElasticAPM, client=apm) ----- - -[float] -[[starlette-fastapi]] -==== FastAPI - -Because FastAPI supports Starlette middleware, using the agent with FastAPI -is almost exactly the same as with Starlette: - -[source,python] ----- -from fastapi import FastAPI -from elasticapm.contrib.starlette ElasticAPM - -app = FastAPI() -app.add_middleware(ElasticAPM) ----- - -[float] -[[starlette-usage]] -==== Usage - -Once you have configured the agent, it will automatically track transactions -and capture uncaught exceptions within starlette. - -Capture an arbitrary exception by calling -<>: - -[source,python] ----- -try: - 1 / 0 -except ZeroDivisionError: - apm.client.capture_exception() ----- - -Log a generic message with <>: - -[source,python] ----- -apm.client.capture_message('hello, world!') ----- - -[float] -[[starlette-performance-metrics]] -==== Performance metrics - -If you've followed the instructions above, the agent has installed our -instrumentation middleware which will process all requests through your app. -This will measure response times, as well as detailed performance data for -all supported technologies. - -NOTE: Due to the fact that `asyncio` drivers are usually separate from their -synchronous counterparts, specific instrumentation is needed for all drivers. -The support for asynchronous drivers is currently quite limited. - -[float] -[[starlette-ignoring-specific-views]] -===== Ignoring specific routes - -You can use the -<> -configuration option to ignore specific routes. The list given should be a -list of regular expressions which are matched against the transaction name: - -[source,python] ----- -apm = make_apm_client({ - # ... - 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET /secret', '/extra_secret'] - # ... -}) ----- - -This would ignore any requests using the `GET /secret` route -and any requests containing `/extra_secret`. - - -[float] -[[supported-starlette-and-python-versions]] -==== Supported Starlette and Python versions - -A list of supported <> and -<> versions can be found on our -<> page. - -NOTE: Elastic APM only supports `asyncio` when using Python 3.7+ diff --git a/docs/supported-technologies.asciidoc b/docs/supported-technologies.asciidoc deleted file mode 100644 index 9a3b314d1..000000000 --- a/docs/supported-technologies.asciidoc +++ /dev/null @@ -1,677 +0,0 @@ -[[supported-technologies]] -== Supported Technologies - -[[framework-support]] -The Elastic APM Python Agent comes with support for the following frameworks: - - * <> - * <> - * <> - * <> - * <> - * <> - * <> - -For other frameworks and custom Python code, the agent exposes a set of <> for integration. - -[float] -[[supported-python]] -=== Python - -The following Python versions are supported: - - * 3.6 - * 3.7 - * 3.8 - * 3.9 - * 3.10 - * 3.11 - * 3.12 - -[float] -[[supported-django]] -=== Django - -We support these Django versions: - - * 1.11 - * 2.0 - * 2.1 - * 2.2 - * 3.0 - * 3.1 - * 3.2 - * 4.0 - -For upcoming Django versions, we generally aim to ensure compatibility starting with the first Release Candidate. - -NOTE: we currently don't support Django running in ASGI mode. - -[float] -[[supported-flask]] -=== Flask - -We support these Flask versions: - - * 0.10 (Deprecated) - * 0.11 (Deprecated) - * 0.12 (Deprecated) - * 1.0 - * 1.1 - * 2.0 - -[float] -[[supported-aiohttp]] -=== Aiohttp Server - -We support these aiohttp versions: - - * 3.0+ - -[float] -[[supported-tornado]] -=== Tornado - -We support these tornado versions: - - * 6.0+ - - -[float] -[[supported-sanic]] -=== Sanic - -We support these sanic versions: - - * 20.12.2+ - - -[float] -[[supported-starlette]] -=== Starlette/FastAPI - -We support these Starlette versions: - - * 0.13.0+ - -Any FastAPI version which uses a supported Starlette version should also -be supported. - -[float] -[[supported-grpc]] -=== GRPC - -We support these `grpcio` versions: - - * 1.24.0+ - - -[float] -[[automatic-instrumentation]] -== Automatic Instrumentation - -The Python APM agent comes with automatic instrumentation of various 3rd party modules and standard library modules. - -[float] -[[automatic-instrumentation-scheduling]] -=== Scheduling - -[float] -[[automatic-instrumentation-scheduling-celery]] -===== Celery - -We support these Celery versions: - -* 3.x -* 4.x - -Celery tasks will be recorded automatically with Django and Flask only. - -[float] -[[automatic-instrumentation-db]] -=== Databases - -[float] -[[automatic-instrumentation-db-elasticsearch]] -==== Elasticsearch - -Instrumented methods: - - * `elasticsearch.transport.Transport.perform_request` - * `elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request` - * `elasticsearch.connection.http_requests.RequestsHttpConnection.perform_request` - * `elasticsearch._async.transport.AsyncTransport.perform_request` - * `elasticsearch_async.connection.AIOHttpConnection.perform_request` - -Additionally, the instrumentation wraps the following methods of the `Elasticsearch` client class: - - * `elasticsearch.client.Elasticsearch.delete_by_query` - * `elasticsearch.client.Elasticsearch.search` - * `elasticsearch.client.Elasticsearch.count` - * `elasticsearch.client.Elasticsearch.update` - -Collected trace data: - - * the query string (if available) - * the `query` element from the request body (if available) - * the response status code - * the count of affected rows (if available) - -We recommend using keyword arguments only with elasticsearch-py, as recommended by -https://elasticsearch-py.readthedocs.io/en/master/api.html#api-documentation[the elasticsearch-py docs]. -If you are using positional arguments, we will be unable to gather the `query` -element from the request body. - -[float] -[[automatic-instrumentation-db-sqlite]] -==== SQLite - -Instrumented methods: - - * `sqlite3.connect` - * `sqlite3.dbapi2.connect` - * `pysqlite2.dbapi2.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - - -[float] -[[automatic-instrumentation-db-mysql]] -==== MySQLdb - -Library: `MySQLdb` - -Instrumented methods: - - * `MySQLdb.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-mysql-connector]] -==== mysql-connector - -Library: `mysql-connector-python` - -Instrumented methods: - - * `mysql.connector.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-pymysql]] -==== pymysql - -Library: `pymysql` - -Instrumented methods: - - * `pymysql.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-aiomysql]] -==== aiomysql - -Library: `aiomysql` - -Instrumented methods: - - * `aiomysql.cursors.Cursor.execute` - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-postgres]] -==== PostgreSQL - -Library: `psycopg2`, `psycopg2-binary` (`>=2.9`) - -Instrumented methods: - - * `psycopg2.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-aiopg]] -==== aiopg - -Library: `aiopg` (`>=1.0`) - -Instrumented methods: - - * `aiopg.cursor.Cursor.execute` - * `aiopg.cursor.Cursor.callproc` - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-asyncg]] -==== asyncpg - -Library: `asyncpg` (`>=0.20`) - -Instrumented methods: - - * `asyncpg.connection.Connection.execute` - * `asyncpg.connection.Connection.executemany` - - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-pyodbc]] -==== PyODBC - -Library: `pyodbc`, (`>=4.0`) - -Instrumented methods: - - * `pyodbc.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-mssql]] -==== MS-SQL - -Library: `pymssql`, (`>=2.1.0`) - -Instrumented methods: - - * `pymssql.connect` - -The instrumented `connect` method returns a wrapped connection/cursor which instruments the actual `Cursor.execute` calls. - -Collected trace data: - - * parametrized SQL query - -[float] -[[automatic-instrumentation-db-mongodb]] -==== MongoDB - -Library: `pymongo`, `>=2.9,<3.8` - -Instrumented methods: - - * `pymongo.collection.Collection.aggregate` - * `pymongo.collection.Collection.bulk_write` - * `pymongo.collection.Collection.count` - * `pymongo.collection.Collection.create_index` - * `pymongo.collection.Collection.create_indexes` - * `pymongo.collection.Collection.delete_many` - * `pymongo.collection.Collection.delete_one` - * `pymongo.collection.Collection.distinct` - * `pymongo.collection.Collection.drop` - * `pymongo.collection.Collection.drop_index` - * `pymongo.collection.Collection.drop_indexes` - * `pymongo.collection.Collection.ensure_index` - * `pymongo.collection.Collection.find_and_modify` - * `pymongo.collection.Collection.find_one` - * `pymongo.collection.Collection.find_one_and_delete` - * `pymongo.collection.Collection.find_one_and_replace` - * `pymongo.collection.Collection.find_one_and_update` - * `pymongo.collection.Collection.group` - * `pymongo.collection.Collection.inline_map_reduce` - * `pymongo.collection.Collection.insert` - * `pymongo.collection.Collection.insert_many` - * `pymongo.collection.Collection.insert_one` - * `pymongo.collection.Collection.map_reduce` - * `pymongo.collection.Collection.reindex` - * `pymongo.collection.Collection.remove` - * `pymongo.collection.Collection.rename` - * `pymongo.collection.Collection.replace_one` - * `pymongo.collection.Collection.save` - * `pymongo.collection.Collection.update` - * `pymongo.collection.Collection.update_many` - * `pymongo.collection.Collection.update_one` - -Collected trace data: - - * database name - * method name - - -[float] -[[automatic-instrumentation-db-redis]] -==== Redis - -Library: `redis` (`>=2.8`) - -Instrumented methods: - - * `redis.client.Redis.execute_command` - * `redis.client.Pipeline.execute` - -Collected trace data: - - * Redis command name - - -[float] -[[automatic-instrumentation-db-aioredis]] -==== aioredis - -Library: `aioredis` (`<2.0`) - -Instrumented methods: - - * `aioredis.pool.ConnectionsPool.execute` - * `aioredis.commands.transaction.Pipeline.execute` - * `aioredis.connection.RedisConnection.execute` - -Collected trace data: - - * Redis command name - -[float] -[[automatic-instrumentation-db-cassandra]] -==== Cassandra - -Library: `cassandra-driver` (`>=3.4,<4.0`) - -Instrumented methods: - - * `cassandra.cluster.Session.execute` - * `cassandra.cluster.Cluster.connect` - -Collected trace data: - - * CQL query - -[float] -[[automatic-instrumentation-db-python-memcache]] -==== Python Memcache - -Library: `python-memcached` (`>=1.51`) - -Instrumented methods: - -* `memcache.Client.add` -* `memcache.Client.append` -* `memcache.Client.cas` -* `memcache.Client.decr` -* `memcache.Client.delete` -* `memcache.Client.delete_multi` -* `memcache.Client.disconnect_all` -* `memcache.Client.flush_all` -* `memcache.Client.get` -* `memcache.Client.get_multi` -* `memcache.Client.get_slabs` -* `memcache.Client.get_stats` -* `memcache.Client.gets` -* `memcache.Client.incr` -* `memcache.Client.prepend` -* `memcache.Client.replace` -* `memcache.Client.set` -* `memcache.Client.set_multi` -* `memcache.Client.touch` - -Collected trace data: - -* Destination (address and port) - -[float] -[[automatic-instrumentation-db-pymemcache]] -==== pymemcache - -Library: `pymemcache` (`>=3.0`) - -Instrumented methods: - -* `pymemcache.client.base.Client.add` -* `pymemcache.client.base.Client.append` -* `pymemcache.client.base.Client.cas` -* `pymemcache.client.base.Client.decr` -* `pymemcache.client.base.Client.delete` -* `pymemcache.client.base.Client.delete_many` -* `pymemcache.client.base.Client.delete_multi` -* `pymemcache.client.base.Client.flush_all` -* `pymemcache.client.base.Client.get` -* `pymemcache.client.base.Client.get_many` -* `pymemcache.client.base.Client.get_multi` -* `pymemcache.client.base.Client.gets` -* `pymemcache.client.base.Client.gets_many` -* `pymemcache.client.base.Client.incr` -* `pymemcache.client.base.Client.prepend` -* `pymemcache.client.base.Client.quit` -* `pymemcache.client.base.Client.replace` -* `pymemcache.client.base.Client.set` -* `pymemcache.client.base.Client.set_many` -* `pymemcache.client.base.Client.set_multi` -* `pymemcache.client.base.Client.stats` -* `pymemcache.client.base.Client.touch` - -Collected trace data: - -* Destination (address and port) - -[float] -[[automatic-instrumentation-db-kafka-python]] -==== kafka-python - -Library: `kafka-python` (`>=2.0`) - -Instrumented methods: - - * `kafka.KafkaProducer.send`, - * `kafka.KafkaConsumer.poll`, - * `kafka.KafkaConsumer.\\__next__` - -Collected trace data: - - * Destination (address and port) - * topic (if applicable) - - -[float] -[[automatic-instrumentation-http]] -=== External HTTP requests - -[float] -[[automatic-instrumentation-stdlib-urllib]] -==== Standard library - -Library: `urllib2` (Python 2) / `urllib.request` (Python 3) - -Instrumented methods: - - * `urllib2.AbstractHTTPHandler.do_open` / `urllib.request.AbstractHTTPHandler.do_open` - -Collected trace data: - - * HTTP method - * requested URL - -[float] -[[automatic-instrumentation-urllib3]] -==== urllib3 - -Library: `urllib3` - -Instrumented methods: - - * `urllib3.connectionpool.HTTPConnectionPool.urlopen` - -Additionally, we instrumented vendored instances of urllib3 in the following libraries: - - * `requests` - * `botocore` - -Both libraries have "unvendored" urllib3 in more recent versions, we recommend to use the newest versions. - -Collected trace data: - - * HTTP method - * requested URL - -[float] -[[automatic-instrumentation-requests]] -==== requests - -Instrumented methods: - - * `requests.sessions.Session.send` - -Collected trace data: - - * HTTP method - * requested URL - -[float] -[[automatic-instrumentation-aiohttp-client]] -==== AIOHTTP Client - -Instrumented methods: - - * `aiohttp.client.ClientSession._request` - -Collected trace data: - - * HTTP method - * requested URL - -[float] -[[automatic-instrumentation-httpx]] -==== httpx - -Instrumented methods: - - * `httpx.Client.send - -Collected trace data: - - * HTTP method - * requested URL - - -[float] -[[automatic-instrumentation-services]] -=== Services - -[float] -[[automatic-instrumentation-boto3]] -==== AWS Boto3 / Botocore - -Library: `boto3` (`>=1.0`) - -Instrumented methods: - - * `botocore.client.BaseClient._make_api_call` - -Collected trace data for all services: - - * AWS region (e.g. `eu-central-1`) - * AWS service name (e.g. `s3`) - * operation name (e.g. `ListBuckets`) - -Additionally, some services collect more specific data - -[float] -[[automatic-instrumentation-aiobotocore]] -==== AWS Aiobotocore - -Library: `aiobotocore` (`>=2.2.0`) - -Instrumented methods: - - * `aiobotocore.client.BaseClient._make_api_call` - -Collected trace data for all services: - - * AWS region (e.g. `eu-central-1`) - * AWS service name (e.g. `s3`) - * operation name (e.g. `ListBuckets`) - -Additionally, some services collect more specific data - -[float] -[[automatic-instrumentation-s3]] -===== S3 - - * Bucket name - -[float] -[[automatic-instrumentation-dynamodb]] -===== DynamoDB - - * Table name - - -[float] -[[automatic-instrumentation-sns]] -===== SNS - - * Topic name - -[float] -[[automatic-instrumentation-sqs]] -===== SQS - - * Queue name - -[float] -[[automatic-instrumentation-template-engines]] -=== Template Engines - -[float] -[[automatic-instrumentation-dtl]] -==== Django Template Language - -Library: `Django` (see <> for supported versions) - -Instrumented methods: - - * `django.template.Template.render` - -Collected trace data: - - * template name - -[float] -[[automatic-instrumentation-jinja2]] -==== Jinja2 - -Library: `jinja2` - -Instrumented methods: - - * `jinja2.Template.render` - -Collected trace data: - - * template name diff --git a/docs/tornado.asciidoc b/docs/tornado.asciidoc deleted file mode 100644 index c7281761f..000000000 --- a/docs/tornado.asciidoc +++ /dev/null @@ -1,125 +0,0 @@ -[[tornado-support]] -=== Tornado Support - -Incorporating Elastic APM into your Tornado project only requires a few easy -steps. - -[float] -[[tornado-installation]] -==== Installation - -Install the Elastic APM agent using pip: - -[source,bash] ----- -$ pip install elastic-apm ----- - -or add `elastic-apm` to your project's `requirements.txt` file. - - -[float] -[[tornado-setup]] -==== Setup - -To set up the agent, you need to initialize it with appropriate settings. - -The settings are configured either via environment variables, -the application's settings, or as initialization arguments. - -You can find a list of all available settings in the -<> page. - -To initialize the agent for your application using environment variables: - -[source,python] ----- -import tornado.web -from elasticapm.contrib.tornado import ElasticAPM - -app = tornado.web.Application() -apm = ElasticAPM(app) ----- - -To configure the agent using `ELASTIC_APM` in your application's settings: - -[source,python] ----- -import tornado.web -from elasticapm.contrib.tornado import ElasticAPM - -app = tornado.web.Application() -app.settings['ELASTIC_APM'] = { - 'SERVICE_NAME': '', - 'SECRET_TOKEN': '', -} -apm = ElasticAPM(app) ----- - -[float] -[[tornado-usage]] -==== Usage - -Once you have configured the agent, it will automatically track transactions -and capture uncaught exceptions within tornado. - -Capture an arbitrary exception by calling -<>: - -[source,python] ----- -try: - 1 / 0 -except ZeroDivisionError: - apm.client.capture_exception() ----- - -Log a generic message with <>: - -[source,python] ----- -apm.client.capture_message('hello, world!') ----- - -[float] -[[tornado-performance-metrics]] -==== Performance metrics - -If you've followed the instructions above, the agent has installed our -instrumentation within the base RequestHandler class in tornado.web. This will -measure response times, as well as detailed performance data for all supported -technologies. - -NOTE: Due to the fact that `asyncio` drivers are usually separate from their -synchronous counterparts, specific instrumentation is needed for all drivers. -The support for asynchronous drivers is currently quite limited. - -[float] -[[tornado-ignoring-specific-views]] -===== Ignoring specific routes - -You can use the -<> -configuration option to ignore specific routes. The list given should be a -list of regular expressions which are matched against the transaction name: - -[source,python] ----- -app.settings['ELASTIC_APM'] = { - # ... - 'TRANSACTIONS_IGNORE_PATTERNS': ['^GET SecretHandler', 'MainHandler'] - # ... -} ----- - -This would ignore any requests using the `GET SecretHandler` route -and any requests containing `MainHandler`. - - -[float] -[[supported-tornado-and-python-versions]] -==== Supported tornado and Python versions - -A list of supported <> and <> versions can be found on our <> page. - -NOTE: Elastic APM only supports `asyncio` when using Python 3.7+ diff --git a/docs/troubleshooting.asciidoc b/docs/troubleshooting.asciidoc deleted file mode 100644 index 40b8ed8fe..000000000 --- a/docs/troubleshooting.asciidoc +++ /dev/null @@ -1,172 +0,0 @@ -[[troubleshooting]] -== Troubleshooting - -Below are some resources and tips for troubleshooting and debugging the -python agent. - -* <> -* <> -* <> -* <> - -[float] -[[easy-fixes]] -=== Easy Fixes - -Before you try anything else, go through the following sections to ensure that -the agent is configured correctly. This is not an exhaustive list, but rather -a list of common problems that users run into. - -[float] -[[debug-mode]] -==== Debug Mode - -Most frameworks support a debug mode. Generally, this mode is intended for -non-production environments and provides detailed error messages and logging of -potentially sensitive data. Because of these security issues, the agent will -not collect traces if the app is in debug mode by default. - -You can override this behavior with the <> configuration. - -Note that configuration of the agent should occur before creation of any -`ElasticAPM` objects: - -[source,python] ----- -app = Flask(__name__) -app.config["ELASTIC_APM"] = {"DEBUG": True} -apm = ElasticAPM(app, service_name="flask-app") ----- - -[float] -[[psutil-metrics]] -==== `psutil` for Metrics - -To get CPU and system metrics on non-Linux systems, `psutil` must be -installed. The agent should automatically show a warning on start if it is -not installed, but sometimes this warning can be suppressed. Install `psutil` -and metrics should be collected by the agent and sent to the APM Server. - -[source,bash] ----- -python3 -m pip install psutil ----- - -[float] -[[apm-server-credentials]] -==== Credential issues - -In order for the agent to send data to the APM Server, it may need an -<> or a <>. Double -check your APM Server settings and make sure that your credentials are -configured correctly. Additionally, check that <> -is correct. - -[float] -[[django-test]] -=== Django `check` and `test` - -When used with Django, the agent provides two management commands to help debug -common issues. Head over to the <> -for more information. - -[float] -[[agent-logging]] -=== Agent logging - -To get the agent to log more data, all that is needed is a -https://docs.python.org/3/library/logging.html#handler-objects[Handler] which -is attached either to the `elasticapm` logger or to the root logger. - -Note that if you attach the handler to the root logger, you also need to -explicitly set the log level of the `elasticapm` logger: - -[source,python] ----- -import logging -apm_logger = logging.getLogger("elasticapm") -apm_logger.setLevel(logging.DEBUG) ----- - -[float] -[[django-agent-logging]] -==== Django - -The simplest way to log more data from the agent is to add a console logging -Handler to the `elasticapm` logger. Here's a (very simplified) example: - -[source,python] ----- -LOGGING = { - 'handlers': { - 'console': { - 'level': 'DEBUG', - 'class': 'logging.StreamHandler' - } - }, - 'loggers': { - 'elasticapm': { - 'level': 'DEBUG', - 'handlers': ['console'] - }, - }, -} ----- - -[float] -[[flask-agent-logging]] -==== Flask - -Flask https://flask.palletsprojects.com/en/1.1.x/logging/[recommends using `dictConfig()`] -to set up logging. If you're using this format, adding logging for the agent -will be very similar to the <>. - -Otherwise, you can use the <>. - -[float] -[[generic-agent-logging]] -==== Generic instructions - -Creating a console Handler and adding it to the `elasticapm` logger is easy: - -[source,python] ----- -import logging - -elastic_apm_logger = logging.getLogger("elasticapm") -console_handler = logging.StreamHandler() -console_handler.setLevel(logging.DEBUG) -elastic_apm_logger.addHandler(console_handler) ----- - -You can also just add the console Handler to the root logger. This will apply -that handler to all log messages from all modules. - -[source,python] ----- -import logging - -logger = logging.getLogger() -console_handler = logging.StreamHandler() -console_handler.setLevel(logging.DEBUG) -logger.addHandler(console_handler) ----- - -See the https://docs.python.org/3/library/logging.html[python logging docs] -for more details about Handlers (and information on how to format your logs -using Formatters). - -[float] -[[disable-agent]] -=== Disable the Agent - -In the unlikely event the agent causes disruptions to a production application, -you can disable the agent while you troubleshoot. - -If you have access to <>, -you can disable the recording of events by setting <> to `false`. -When changed at runtime from a supported source, there's no need to restart your application. - -If that doesn't work, or you don't have access to dynamic configuration, you can disable the agent by setting -<> to `false`. -You'll need to restart your application for the changes to take effect. diff --git a/docs/tuning.asciidoc b/docs/tuning.asciidoc deleted file mode 100644 index 6030f29cc..000000000 --- a/docs/tuning.asciidoc +++ /dev/null @@ -1,115 +0,0 @@ -[[tuning-and-overhead]] -== Performance tuning - -Using an APM solution comes with certain trade-offs, and the Python agent for Elastic APM is no different. -Instrumenting your code, measuring timings, recording context data, etc., all need resources: - - * CPU time - * memory - * bandwidth use - * Elasticsearch storage - -We invested and continue to invest a lot of effort to keep the overhead of using Elastic APM as low as possible. -But because every deployment is different, there are some knobs you can turn to adapt it to your specific needs. - -[float] -[[tuning-sample-rate]] -=== Transaction Sample Rate - -The easiest way to reduce the overhead of the agent is to tell the agent to do less. -If you set the <> to a value below `1.0`, -the agent will randomly sample only a subset of transactions. -Unsampled transactions only record the name of the transaction, the overall transaction time, and the result: - -[options="header"] -|============ -| Field | Sampled | Unsampled -| Transaction name | yes | yes -| Duration | yes | yes -| Result | yes | yes -| Context | yes | no -| Tags | yes | no -| Spans | yes | no -|============ - -Reducing the sample rate to a fraction of all transactions can make a huge difference in all four of the mentioned resource types. - -[float] -[[tuning-queue]] -=== Transaction Queue - -To reduce the load on the APM Server, the agent does not send every transaction up as it happens. -Instead, it queues them up and flushes the queue periodically, or when it reaches a maximum size, using a background thread. - -While this reduces the load on the APM Server (and to a certain extent on the agent), -holding on to the transaction data in a queue uses memory. -If you notice that using the Python agent results in a large increase of memory use, -you can use these settings: - - * <> to reduce the time between queue flushes - * <> to reduce the maximum size of the queue - -The first setting, `api_request_time`, is helpful if you have a sustained high number of transactions. -The second setting, `api_request_size`, can help if you experience peaks of transactions -(a large number of transactions in a short period of time). - -Keep in mind that reducing the value of either setting will cause the agent to send more HTTP requests to the APM Server, -potentially causing a higher load. - -[float] -[[tuning-max-spans]] -=== Spans per transaction - -The average amount of spans per transaction can influence how much time the agent spends in each transaction collecting contextual data for each span, -and the storage space needed in Elasticsearch. -In our experience, most _usual_ transactions should have well below 100 spans. -In some cases, however, the number of spans can explode: - - * long-running transactions - * unoptimized code, e.g. doing hundreds of SQL queries in a loop - -To avoid these edge cases overloading both the agent and the APM Server, -the agent stops recording spans when a specified limit is reached. -You can configure this limit by changing the <> setting. - -[float] -[[tuning-span-stack-trace-collection]] -=== Span Stack Trace Collection - -Collecting stack traces for spans can be fairly costly from a performance standpoint. -Stack traces are very useful for pinpointing which part of your code is generating a span; -however, these stack traces are less useful for very short spans (as problematic spans tend to be longer). - -You can define a minimal threshold for span duration -using the <> setting. -If a span's duration is less than this config value, no stack frames will be collected for this span. - -[float] -[[tuning-frame-context]] -=== Collecting Frame Context - -When a stack trace is captured, the agent will also capture several lines of source code around each frame location in the stack trace. This allows the APM app to give greater insight into where exactly the error or span happens. - -There are four settings you can modify to control this behavior: - -* <> -* <> -* <> -* <> - -As you can see, these settings are divided between app frames, which represent your application code, and library frames, which represent the code of your dependencies. Each of these categories are also split into separate error and span settings. - -Reading source files inside a running application can cause a lot of disk I/O, and sending up source lines for each frame will have a network and storage cost that is quite high. Turning down these limits will help prevent excessive memory usage. - -[float] -[[tuning-body-headers]] -=== Collecting headers and request body - -You can configure the Elastic APM agent to capture headers of both requests and responses (<>), -as well as request bodies (<>). -By default, capturing request bodies is disabled. -Enabling it for transactions may introduce noticeable overhead, as well as increased storage use, depending on the nature of your POST requests. -In most scenarios, we advise against enabling request body capturing for transactions, and only enable it if necessary for errors. - -Capturing request/response headers has less overhead on the agent, but can have an impact on storage use. -If storage use is a problem for you, it might be worth disabling. diff --git a/docs/upgrading.asciidoc b/docs/upgrading.asciidoc deleted file mode 100644 index 509116a06..000000000 --- a/docs/upgrading.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -[[upgrading]] -== Upgrading - -Upgrades between minor versions of the agent, like from 3.1 to 3.2 are always backwards compatible. -Upgrades that involve a major version bump often come with some backwards incompatible changes. - -We highly recommend to always pin the version of `elastic-apm` in your `requirements.txt` or `Pipfile`. -This avoids automatic upgrades to potentially incompatible versions. - -[float] -[[end-of-life-dates]] -=== End of life dates - -We love all our products, but sometimes we must say goodbye to a release so that we can continue moving -forward on future development and innovation. -Our https://www.elastic.co/support/eol[End of life policy] defines how long a given release is considered supported, -as well as how long a release is considered still in active development or maintenance. - -[[upgrading-6.x]] -=== Upgrading to version 6 of the agent - -==== Python 2 no longer supported - -Please upgrade to Python 3.6+ to continue to receive regular updates. - -==== `SANITIZE_FIELD_NAMES` changes - -If you are using a non-default `sanitize_field_names` config, please note -that your entries must be surrounded with stars (e.g. `*secret*`) in order to -maintain previous behavior. - -==== Tags removed (in favor of labels) - -Tags were deprecated in the 5.x release (in favor of labels). They have now been -removed. - -[[upgrading-5.x]] -=== Upgrading to version 5 of the agent - -==== APM Server 7.3 required for some features - -APM Server and Kibana 7.3 introduced support for collecting breakdown metrics, and central configuration of APM agents. -To use these features, please update the Python agent to 5.0+ and APM Server / Kibana to 7.3+ - -==== Tags renamed to Labels - -To better align with other parts of the Elastic Stack and the {ecs-ref}/index.html[Elastic Common Schema], -we renamed "tags" to "labels", and introduced limited support for typed labels. -While tag values were only allowed to be strings, label values can be strings, booleans, or numerical. - -To benefit from this change, ensure that you run at least *APM Server 6.7*, and use `elasticapm.label()` instead of `elasticapm.tag()`. -The `tag()` API will continue to work as before, but emit a `DeprecationWarning`. It will be removed in 6.0 of the agent. - -[[upgrading-4.x]] -=== Upgrading to version 4 of the agent - -4.0 of the Elastic APM Python Agent comes with several backwards incompatible changes. - -[[upgrading-4.x-apm-server]] -==== APM Server 6.5 required -This version of the agent is *only compatible with APM Server 6.5+*. -To upgrade, we recommend to first upgrade APM Server, and then the agent. -APM Server 6.5+ is backwards compatible with versions 2.x and 3.x of the agent. - -[[upgrading-4.x-configuration]] -==== Configuration options - -Several configuration options have been removed, or renamed - - * `flush_interval` has been removed - * the `flush_interval` and `max_queue_size` settings have been removed. - * new settings introduced: `api_request_time` and `api_request_size`. - * Some settings now require a unit for duration or size. See <> and <>. - -[[upgrading-4.x-processors]] -==== Processors - -The method to write processors for sanitizing events has been changed. -It will now be called for every type of event (transactions, spans and errors), -unless the event types are limited using a decorator. -See <> for more information. diff --git a/docs/wrapper.asciidoc b/docs/wrapper.asciidoc deleted file mode 100644 index 4658201c6..000000000 --- a/docs/wrapper.asciidoc +++ /dev/null @@ -1,58 +0,0 @@ -[[wrapper-support]] -=== Wrapper Support - -experimental::[] - -The following frameworks are supported using our new wrapper script for -no-code-changes instrumentation: - - * Django - * Flask - * Starlette - -Please keep in mind that these instrumentations are a work in progress! We'd -love to have feedback on our -https://github.com/elastic/apm-agent-python/issues/new/choose[issue tracker]. - -[[wrapper-usage]] -==== Usage - -When installing the agent, an entrypoint script, `elasticapm-run` is installed -as well. You can use this script to instrument your app (assuming it's using a -supported framework) without changing your code! - -[source,bash] ----- -$ elasticapm-run --version -elasticapm-run 6.14.0 ----- - -Alternatively, you can run the entrypoint directly: - -[source,bash] ----- -$ python -m elasticapm.instrumentation.wrapper --version -elasticapm-run 6.14.0 ----- - -The `elasticapm-run` script can be used to run any Python script or module: - -[source,bash] ----- -$ elasticapm-run flask run -$ elasticapm-run python myapp.py ----- - -Generally, config should be passed in via environment variables. For example, - -[source,bash] ----- -$ ELASTIC_APM_SERVICE_NAME=my_flask_app elasticapm-run flask run ----- - -You can also pass config options as arguments to the script: - -[source,bash] ----- -$ elasticapm-run --config "service_name=my_flask_app" --config "debug=true" flask run ----- diff --git a/elasticapm/__init__.py b/elasticapm/__init__.py index 4c19b0a17..b6c4499c4 100644 --- a/elasticapm/__init__.py +++ b/elasticapm/__init__.py @@ -49,7 +49,6 @@ ) from elasticapm.utils.disttracing import trace_parent_from_headers, trace_parent_from_string # noqa: F401 -__all__ = ("VERSION", "Client") _activation_method = None @@ -66,3 +65,28 @@ raise DeprecationWarning("The Elastic APM agent requires Python 3.6+") from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401 E402 + +__all__ = ( + "Client", + "VERSION", + "async_capture_span", + "capture_serverless", + "capture_span", + "get_client", + "get_span_id", + "get_trace_id", + "get_trace_parent_header", + "get_transaction_id", + "instrument", + "label", + "set_context", + "set_custom_context", + "set_transaction_name", + "set_transaction_outcome", + "set_transaction_result", + "set_user_context", + "setup_logging", + "trace_parent_from_headers", + "trace_parent_from_string", + "uninstrument", +) diff --git a/elasticapm/base.py b/elasticapm/base.py index 5f50dc79d..2c82f0d88 100644 --- a/elasticapm/base.py +++ b/elasticapm/base.py @@ -155,7 +155,8 @@ def __init__(self, config=None, **inline) -> None: "processors": self.load_processors(), } if config.transport_json_serializer: - transport_kwargs["json_serializer"] = config.transport_json_serializer + json_serializer_func = import_string(config.transport_json_serializer) + transport_kwargs["json_serializer"] = json_serializer_func self._api_endpoint_url = urllib.parse.urljoin( self.config.server_url if self.config.server_url.endswith("/") else self.config.server_url + "/", @@ -373,7 +374,7 @@ def get_service_info(self): def get_process_info(self): result = { "pid": os.getpid(), - "ppid": os.getppid() if hasattr(os, "getppid") else None, + "ppid": os.getppid(), "title": None, # Note: if we implement this, the value needs to be wrapped with keyword_field } if self.config.include_process_args: diff --git a/elasticapm/conf/__init__.py b/elasticapm/conf/__init__.py index 5318d64b5..f00a2cda9 100644 --- a/elasticapm/conf/__init__.py +++ b/elasticapm/conf/__init__.py @@ -37,6 +37,8 @@ import threading from datetime import timedelta +import _hashlib + from elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES, TRACE_CONTINUATION_STRATEGY from elasticapm.utils import compat, starmatch_to_regex from elasticapm.utils.logging import get_logger @@ -220,6 +222,8 @@ class _BoolConfigValue(_ConfigValue): def __init__(self, dict_key, true_string="true", false_string="false", **kwargs) -> None: self.true_string = true_string self.false_string = false_string + # this is necessary to have the bool type preserved in _validate + kwargs["type"] = bool super(_BoolConfigValue, self).__init__(dict_key, **kwargs) def __set__(self, instance, value) -> None: @@ -228,6 +232,7 @@ def __set__(self, instance, value) -> None: value = True elif value.lower() == self.false_string: value = False + value = self._validate(instance, value) self._callback_if_changed(instance, value) instance._values[self.dict_key] = bool(value) @@ -275,7 +280,14 @@ def __call__(self, value, field_name): match = re.match(self.regex, value) if match: return value - raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name) + raise ConfigurationError( + "{}={} does not match pattern {}".format( + field_name, + value, + self.verbose_pattern, + ), + field_name, + ) class UnitValidator(object): @@ -288,12 +300,19 @@ def __call__(self, value, field_name): value = str(value) match = re.match(self.regex, value, re.IGNORECASE) if not match: - raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name) + raise ConfigurationError( + "{}={} does not match pattern {}".format( + field_name, + value, + self.verbose_pattern, + ), + field_name, + ) val, unit = match.groups() try: val = int(val) * self.unit_multipliers[unit] except KeyError: - raise ConfigurationError("{} is not a supported unit".format(unit), field_name) + raise ConfigurationError("{}={} is not a supported unit".format(field_name, unit), field_name) return val @@ -315,7 +334,7 @@ def __call__(self, value, field_name): try: value = float(value) except ValueError: - raise ConfigurationError("{} is not a float".format(value), field_name) + raise ConfigurationError("{}={} is not a float".format(field_name, value), field_name) multiplier = 10**self.precision rounded = math.floor(value * multiplier + 0.5) / multiplier if rounded == 0 and self.minimum and value != 0: @@ -337,8 +356,10 @@ def __init__(self, range_start, range_end, range_desc) -> None: def __call__(self, value, field_name): if self.range_start <= value <= self.range_end: raise ConfigurationError( - "{} cannot be in range: {}".format( - value, self.range_desc.format(**{"range_start": self.range_start, "range_end": self.range_end}) + "{}={} cannot be in range: {}".format( + field_name, + value, + self.range_desc.format(**{"range_start": self.range_start, "range_end": self.range_end}), ), field_name, ) @@ -349,11 +370,35 @@ class FileIsReadableValidator(object): def __call__(self, value, field_name): value = os.path.normpath(value) if not os.path.exists(value): - raise ConfigurationError("{} does not exist".format(value), field_name) + raise ConfigurationError("{}={} does not exist".format(field_name, value), field_name) elif not os.path.isfile(value): - raise ConfigurationError("{} is not a file".format(value), field_name) + raise ConfigurationError("{}={} is not a file".format(field_name, value), field_name) elif not os.access(value, os.R_OK): - raise ConfigurationError("{} is not readable".format(value), field_name) + raise ConfigurationError("{}={} is not readable".format(field_name, value), field_name) + return value + + +def _in_fips_mode(): + try: + return _hashlib.get_fips_mode() == 1 + except AttributeError: + # versions older of Python3.9 do not have the helper + return False + + +class SupportedValueInFipsModeValidator(object): + """If FIPS mode is enabled only supported_value is accepted""" + + def __init__(self, supported_value) -> None: + self.supported_value = supported_value + + def __call__(self, value, field_name): + if _in_fips_mode(): + if value != self.supported_value: + raise ConfigurationError( + "{}={} must be set to {} if FIPS mode is enabled".format(field_name, value, self.supported_value), + field_name, + ) return value @@ -384,7 +429,12 @@ def __call__(self, value, field_name): ret = self.valid_values.get(value.lower()) if ret is None: raise ConfigurationError( - "{} is not in the list of valid values: {}".format(value, list(self.valid_values.values())), field_name + "{}={} is not in the list of valid values: {}".format( + field_name, + value, + list(self.valid_values.values()), + ), + field_name, ) return ret @@ -558,7 +608,9 @@ class Config(_ConfigBase): server_url = _ConfigValue("SERVER_URL", default="http://127.0.0.1:8200", required=True) server_cert = _ConfigValue("SERVER_CERT", validators=[FileIsReadableValidator()]) server_ca_cert_file = _ConfigValue("SERVER_CA_CERT_FILE", validators=[FileIsReadableValidator()]) - verify_server_cert = _BoolConfigValue("VERIFY_SERVER_CERT", default=True) + verify_server_cert = _BoolConfigValue( + "VERIFY_SERVER_CERT", default=True, validators=[SupportedValueInFipsModeValidator(supported_value=True)] + ) use_certifi = _BoolConfigValue("USE_CERTIFI", default=True) include_paths = _ListConfigValue("INCLUDE_PATHS") exclude_paths = _ListConfigValue("EXCLUDE_PATHS", default=compat.get_default_library_patters()) @@ -691,6 +743,7 @@ class Config(_ConfigBase): default=TRACE_CONTINUATION_STRATEGY.CONTINUE, ) include_process_args = _BoolConfigValue("INCLUDE_PROCESS_ARGS", default=False) + skip_server_info = _BoolConfigValue("SKIP_SERVER_INFO", default=False) @property def is_recording(self): diff --git a/elasticapm/contrib/asgi.py b/elasticapm/contrib/asgi.py index 701ea3d0e..cb66e25eb 100644 --- a/elasticapm/contrib/asgi.py +++ b/elasticapm/contrib/asgi.py @@ -50,6 +50,7 @@ async def wrapped_send(message) -> None: await set_context(lambda: middleware.get_data_from_response(message, constants.TRANSACTION), "response") result = "HTTP {}xx".format(message["status"] // 100) elasticapm.set_transaction_result(result, override=False) + elasticapm.set_transaction_outcome(http_status_code=message["status"], override=False) await send(message) return wrapped_send @@ -76,9 +77,8 @@ async def __call__(self, scope: "Scope", receive: "ASGIReceiveCallable", send: " url, url_dict = self.get_url(scope) body = None if not self.client.should_ignore_url(url): - self.client.begin_transaction( - transaction_type="request", trace_parent=TraceParent.from_headers(scope["headers"]) - ) + headers = self.get_headers(scope) + self.client.begin_transaction(transaction_type="request", trace_parent=TraceParent.from_headers(headers)) self.set_transaction_name(scope["method"], url) if scope["method"] in constants.HTTP_WITH_BODY and self.client.config.capture_body != "off": messages = [] @@ -92,13 +92,15 @@ async def __call__(self, scope: "Scope", receive: "ASGIReceiveCallable", send: " body = str(body_raw, errors="ignore") # Dispatch to the ASGI callable - async def wrapped_receive(): + async def new_wrapped_receive(): if messages: return messages.pop(0) # Once that's done we can just await any other messages. return await receive() + wrapped_receive = new_wrapped_receive + await set_context(lambda: self.get_data_from_request(scope, constants.TRANSACTION, body), "request") try: diff --git a/elasticapm/contrib/django/handlers.py b/elasticapm/contrib/django/handlers.py index 0c97e888d..c980acc4f 100644 --- a/elasticapm/contrib/django/handlers.py +++ b/elasticapm/contrib/django/handlers.py @@ -47,11 +47,11 @@ class LoggingHandler(BaseLoggingHandler): def __init__(self, level=logging.NOTSET) -> None: warnings.warn( - "The LoggingHandler will be deprecated in v7.0 of the agent. " + "The LoggingHandler is deprecated and will be removed in v7.0 of the agent. " "Please use `log_ecs_reformatting` and ship the logs with Elastic " "Agent or Filebeat instead. " "https://www.elastic.co/guide/en/apm/agent/python/current/logs.html", - PendingDeprecationWarning, + DeprecationWarning, ) # skip initialization of BaseLoggingHandler logging.Handler.__init__(self, level=level) diff --git a/elasticapm/contrib/django/middleware/__init__.py b/elasticapm/contrib/django/middleware/__init__.py index c4637d087..612bb5c96 100644 --- a/elasticapm/contrib/django/middleware/__init__.py +++ b/elasticapm/contrib/django/middleware/__init__.py @@ -110,8 +110,9 @@ def process_request_wrapper(wrapped, instance, args, kwargs): elasticapm.set_transaction_name( build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request) ) - finally: - return response + except Exception: + pass + return response def process_response_wrapper(wrapped, instance, args, kwargs): @@ -125,8 +126,9 @@ def process_response_wrapper(wrapped, instance, args, kwargs): elasticapm.set_transaction_name( build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request) ) - finally: - return response + except Exception: + pass + return response class TracingMiddleware(MiddlewareMixin, ElasticAPMClientMiddlewareMixin): diff --git a/elasticapm/contrib/flask/__init__.py b/elasticapm/contrib/flask/__init__.py index e9ec3323e..fdb6906dd 100644 --- a/elasticapm/contrib/flask/__init__.py +++ b/elasticapm/contrib/flask/__init__.py @@ -87,7 +87,7 @@ def __init__(self, app=None, client=None, client_cls=Client, logging=False, **de if self.logging: warnings.warn( "Flask log shipping is deprecated. See the Flask docs for more info and alternatives.", - PendingDeprecationWarning, + DeprecationWarning, ) self.client = client or get_client() self.client_cls = client_cls diff --git a/elasticapm/contrib/grpc/async_server_interceptor.py b/elasticapm/contrib/grpc/async_server_interceptor.py index 5af0c1372..e7c9b659f 100644 --- a/elasticapm/contrib/grpc/async_server_interceptor.py +++ b/elasticapm/contrib/grpc/async_server_interceptor.py @@ -33,20 +33,18 @@ import grpc import elasticapm -from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, _wrap_rpc_behavior, get_trace_parent +from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, get_trace_parent class _AsyncServerInterceptor(grpc.aio.ServerInterceptor): async def intercept_service(self, continuation, handler_call_details): - def transaction_wrapper(behavior, request_streaming, response_streaming): - async def _interceptor(request_or_iterator, context): - if request_streaming or response_streaming: # only unary-unary is supported - return behavior(request_or_iterator, context) + def wrap_unary_unary(behavior): + async def _interceptor(request, context): tp = get_trace_parent(handler_call_details) client = elasticapm.get_client() transaction = client.begin_transaction("request", trace_parent=tp) try: - result = behavior(request_or_iterator, _ServicerContextWrapper(context, transaction)) + result = behavior(request, _ServicerContextWrapper(context, transaction)) # This is so we can support both sync and async rpc functions if inspect.isawaitable(result): @@ -65,4 +63,12 @@ async def _interceptor(request_or_iterator, context): return _interceptor - return _wrap_rpc_behavior(await continuation(handler_call_details), transaction_wrapper) + handler = await continuation(handler_call_details) + if handler.request_streaming or handler.response_streaming: + return handler + + return grpc.unary_unary_rpc_method_handler( + wrap_unary_unary(handler.unary_unary), + request_deserializer=handler.request_deserializer, + response_serializer=handler.response_serializer, + ) diff --git a/elasticapm/contrib/opentelemetry/span.py b/elasticapm/contrib/opentelemetry/span.py index a7d049faa..7917070d5 100644 --- a/elasticapm/contrib/opentelemetry/span.py +++ b/elasticapm/contrib/opentelemetry/span.py @@ -32,7 +32,7 @@ import types as python_types import typing import urllib.parse -from typing import Optional +from typing import Optional, Union from opentelemetry.context import Context from opentelemetry.sdk import trace as oteltrace @@ -157,13 +157,19 @@ def is_recording(self) -> bool: """ return self.elastic_span.transaction.is_sampled and not self.elastic_span.ended_time - def set_status(self, status: Status) -> None: + def set_status(self, status: Union[Status, StatusCode], description: Optional[str] = None) -> None: """Sets the Status of the Span. If used, this will override the default Span status. """ - if status.status_code == StatusCode.ERROR: + # Handle both Status objects and StatusCode enums + if isinstance(status, Status): + status_code = status.status_code + else: + status_code = status + + if status_code == StatusCode.ERROR: self.elastic_span.outcome = constants.OUTCOME.FAILURE - elif status.status_code == StatusCode.OK: + elif status_code == StatusCode.OK: self.elastic_span.outcome = constants.OUTCOME.SUCCESS else: self.elastic_span.outcome = constants.OUTCOME.UNKNOWN diff --git a/elasticapm/contrib/opentracing/__init__.py b/elasticapm/contrib/opentracing/__init__.py index 8fbc99b19..71619ea20 100644 --- a/elasticapm/contrib/opentracing/__init__.py +++ b/elasticapm/contrib/opentracing/__init__.py @@ -36,8 +36,8 @@ warnings.warn( ( - "The OpenTracing bridge will be deprecated in the next major release. " + "The OpenTracing bridge is deprecated and will be removed in the next major release. " "Please migrate to the OpenTelemetry bridge." ), - PendingDeprecationWarning, + DeprecationWarning, ) diff --git a/elasticapm/contrib/sanic/utils.py b/elasticapm/contrib/sanic/utils.py index e4e987274..9744bf89b 100644 --- a/elasticapm/contrib/sanic/utils.py +++ b/elasticapm/contrib/sanic/utils.py @@ -33,7 +33,7 @@ from sanic import Sanic from sanic import __version__ as version -from sanic.cookies import CookieJar +from sanic.cookies import Cookie, CookieJar from sanic.request import Request from sanic.response import HTTPResponse @@ -120,7 +120,14 @@ async def get_response_info(config: Config, response: HTTPResponse, event_type: result["status_code"] = response.status if config.capture_headers: - result["headers"] = dict(response.headers) + + def normalize(v): + # we are getting entries for Set-Cookie headers as Cookie instances + if isinstance(v, Cookie): + return str(v) + return v + + result["headers"] = {k: normalize(v) for k, v in response.headers.items()} if config.capture_body in ("all", event_type) and "octet-stream" not in response.content_type: result["body"] = response.body.decode("utf-8") @@ -148,4 +155,12 @@ def make_client(client_cls=Client, **defaults) -> Client: def _transform_response_cookie(cookies: CookieJar) -> Dict[str, str]: """Transform the Sanic's CookieJar instance into a Normal dictionary to build the context""" - return {k: {"value": v.value, "path": v["path"]} for k, v in cookies.items()} + # old sanic versions used to have an items() method + if hasattr(cookies, "items"): + return {k: {"value": v.value, "path": v["path"]} for k, v in cookies.items()} + + try: + return {cookie.key: {"value": cookie.value, "path": cookie.path} for cookie in cookies.cookies} + except KeyError: + # cookies.cookies assumes Set-Cookie header will be there + return {} diff --git a/elasticapm/contrib/serverless/aws.py b/elasticapm/contrib/serverless/aws.py index 26f37bdfb..e2af1a735 100644 --- a/elasticapm/contrib/serverless/aws.py +++ b/elasticapm/contrib/serverless/aws.py @@ -71,6 +71,9 @@ def capture_serverless(func: Optional[callable] = None, **kwargs) -> callable: @capture_serverless def handler(event, context): return {"statusCode": r.status_code, "body": "Success!"} + + Please note that when using the APM Layer and setting AWS_LAMBDA_EXEC_WRAPPER this is not required and + the handler would be instrumented automatically. """ if not func: # This allows for `@capture_serverless()` in addition to @@ -135,6 +138,18 @@ def prep_kwargs(kwargs=None): return kwargs +def should_normalize_headers(event: dict) -> bool: + """ + Helper to decide if we should normalize headers or not depending on the event + + Even if the documentation says that headers are lowercased it's not always the case for format version 1.0 + https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html + """ + + request_context = event.get("requestContext", {}) + return ("elb" in request_context or "requestId" in request_context) and "http" not in request_context + + class _lambda_transaction(object): """ Context manager for creating transactions around AWS Lambda functions. @@ -162,7 +177,13 @@ def __enter__(self): # service like Step Functions, and is unlikely to be standardized # in any way. We just have to rely on our defaults in this case. self.event = {} - trace_parent = TraceParent.from_headers(self.event.get("headers") or {}) + + headers = self.event.get("headers") or {} + if headers and should_normalize_headers(self.event): + normalized_headers = {k.lower(): v for k, v in headers.items()} + else: + normalized_headers = headers + trace_parent = TraceParent.from_headers(normalized_headers) global COLD_START cold_start = COLD_START @@ -214,11 +235,20 @@ def __enter__(self): transaction_name = "RECEIVE {}".format(record["eventSourceARN"].split(":")[5]) if "Records" in self.event: + # SQS links = [ TraceParent.from_string(record["messageAttributes"]["traceparent"]["stringValue"]) for record in self.event["Records"][:1000] if "messageAttributes" in record and "traceparent" in record["messageAttributes"] ] + # SNS + links += [ + TraceParent.from_string(record["Sns"]["MessageAttributes"]["traceparent"]["Value"]) + for record in self.event["Records"][:1000] + if "Sns" in record + and "MessageAttributes" in record["Sns"] + and "traceparent" in record["Sns"]["MessageAttributes"] + ] else: links = [] diff --git a/elasticapm/contrib/serverless/azure.py b/elasticapm/contrib/serverless/azure.py index ed2444d60..33d406934 100644 --- a/elasticapm/contrib/serverless/azure.py +++ b/elasticapm/contrib/serverless/azure.py @@ -43,8 +43,6 @@ from elasticapm.utils.disttracing import TraceParent from elasticapm.utils.logging import get_logger -SERVERLESS_HTTP_REQUEST = ("api", "elb") - logger = get_logger("elasticapm.serverless") _AnnotatedFunctionT = TypeVar("_AnnotatedFunctionT") @@ -98,7 +96,7 @@ def configure(cls, client_class=AzureFunctionsClient, **kwargs) -> None: if not client: kwargs["metrics_interval"] = "0ms" kwargs["breakdown_metrics"] = "false" - if "metric_sets" not in kwargs and "ELASTIC_APM_METRICS_SETS" not in os.environ: + if "metrics_sets" not in kwargs and "ELASTIC_APM_METRICS_SETS" not in os.environ: # Allow users to override metrics sets kwargs["metrics_sets"] = [] kwargs["central_config"] = "false" @@ -116,7 +114,7 @@ def configure(cls, client_class=AzureFunctionsClient, **kwargs) -> None: and "AZURE_FUNCTIONS_ENVIRONMENT" in os.environ ): kwargs["environment"] = os.environ["AZURE_FUNCTIONS_ENVIRONMENT"] - client = AzureFunctionsClient(**kwargs) + client = client_class(**kwargs) cls.client = client @classmethod diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py index a6262ba86..3dfb225c9 100644 --- a/elasticapm/contrib/starlette/__init__.py +++ b/elasticapm/contrib/starlette/__init__.py @@ -36,6 +36,7 @@ from typing import Dict, Optional import starlette +from starlette.datastructures import Headers from starlette.requests import Request from starlette.routing import Match, Mount from starlette.types import ASGIApp, Message @@ -105,7 +106,7 @@ class ElasticAPM: >>> elasticapm.capture_message('hello, world!') """ - def __init__(self, app: ASGIApp, client: Optional[Client], **kwargs) -> None: + def __init__(self, app: ASGIApp, client: Optional[Client] = None, **kwargs) -> None: """ Args: @@ -146,11 +147,16 @@ async def wrapped_send(message) -> None: ) result = "HTTP {}xx".format(message["status"] // 100) elasticapm.set_transaction_result(result, override=False) + elasticapm.set_transaction_outcome(http_status_code=message["status"], override=False) await send(message) _mocked_receive = None _request_receive = None + # begin the transaction before capturing the body to get that time accounted + trace_parent = TraceParent.from_headers(dict(Headers(scope=scope))) + self.client.begin_transaction("request", trace_parent=trace_parent) + if self.client.config.capture_body != "off": # When we consume the body from receive, we replace the streaming @@ -234,9 +240,6 @@ async def _request_started(self, request: Request) -> None: if self.client.config.capture_body != "off": await get_body(request) - trace_parent = TraceParent.from_headers(dict(request.headers)) - self.client.begin_transaction("request", trace_parent=trace_parent) - await set_context(lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request") transaction_name = self.get_route_name(request) or request.url.path elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False) diff --git a/elasticapm/handlers/logging.py b/elasticapm/handlers/logging.py index 0738df16d..96718d2db 100644 --- a/elasticapm/handlers/logging.py +++ b/elasticapm/handlers/logging.py @@ -47,11 +47,11 @@ class LoggingHandler(logging.Handler): def __init__(self, *args, **kwargs) -> None: warnings.warn( - "The LoggingHandler will be deprecated in v7.0 of the agent. " - "Please use `log_ecs_reformatting` and ship the logs with Elastic " - "Agent or Filebeat instead. " + "The LoggingHandler is deprecated and will be removed in v7.0 of " + "the agent. Please use `log_ecs_reformatting` and ship the logs " + "with Elastic Agent or Filebeat instead. " "https://www.elastic.co/guide/en/apm/agent/python/current/logs.html", - PendingDeprecationWarning, + DeprecationWarning, ) self.client = None if "client" in kwargs: @@ -66,12 +66,9 @@ def __init__(self, *args, **kwargs) -> None: if client_cls: self.client = client_cls(*args, **kwargs) else: - # In 6.0, this should raise a ValueError warnings.warn( - "LoggingHandler requires a Client instance. No Client was " - "received. This will result in an error starting in v6.0 " - "of the agent", - PendingDeprecationWarning, + "LoggingHandler requires a Client instance. No Client was received.", + DeprecationWarning, ) self.client = Client(*args, **kwargs) logging.Handler.__init__(self, level=kwargs.get("level", logging.NOTSET)) @@ -194,6 +191,16 @@ class LoggingFilter(logging.Filter): automatically. """ + def __init__(self, name=""): + super().__init__(name=name) + warnings.warn( + "The LoggingFilter is deprecated and will be removed in v7.0 of " + "the agent. On Python 3.2+, by default we add a LogRecordFactory to " + "your root logger automatically" + "https://www.elastic.co/guide/en/apm/agent/python/current/logs.html", + DeprecationWarning, + ) + def filter(self, record): """ Add elasticapm attributes to `record`. diff --git a/elasticapm/instrumentation/packages/asyncio/asyncpg.py b/elasticapm/instrumentation/packages/asyncio/asyncpg.py index e76dd2bf1..4dcf93311 100644 --- a/elasticapm/instrumentation/packages/asyncio/asyncpg.py +++ b/elasticapm/instrumentation/packages/asyncio/asyncpg.py @@ -55,14 +55,17 @@ class AsyncPGInstrumentation(AsyncAbstractInstrumentedModule): ("asyncpg.protocol.protocol", "Protocol.copy_out"), ] - def get_query(self, method, args): + def get_query(self, method, args, kwargs=None): if method in ["Protocol.query", "Protocol.copy_in", "Protocol.copy_out"]: return args[0] - else: + elif args: return args[0].query + else: + # asyncpg>=0.29 pass data as kwargs + return kwargs["state"].query async def call(self, module, method, wrapped, instance, args, kwargs): - query = self.get_query(method, args) + query = self.get_query(method, args, kwargs) name = extract_signature(query) sql_string = shorten(query, string_length=10000) context = {"db": {"type": "sql", "statement": sql_string}} diff --git a/elasticapm/instrumentation/packages/asyncio/dbapi2_asyncio.py b/elasticapm/instrumentation/packages/asyncio/dbapi2_asyncio.py new file mode 100644 index 000000000..078204bb0 --- /dev/null +++ b/elasticapm/instrumentation/packages/asyncio/dbapi2_asyncio.py @@ -0,0 +1,126 @@ +# BSD 3-Clause License +# +# Copyright (c) 2019, Elasticsearch BV +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides classes to instrument dbapi2 providers + +https://www.python.org/dev/peps/pep-0249/ +""" + +import wrapt + +from elasticapm.contrib.asyncio.traces import async_capture_span +from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule +from elasticapm.instrumentation.packages.dbapi2 import EXEC_ACTION, QUERY_ACTION +from elasticapm.utils.encoding import shorten + + +class AsyncCursorProxy(wrapt.ObjectProxy): + provider_name = None + DML_QUERIES = ("INSERT", "DELETE", "UPDATE") + + def __init__(self, wrapped, destination_info=None): + super(AsyncCursorProxy, self).__init__(wrapped) + self._self_destination_info = destination_info or {} + + async def callproc(self, procname, params=None): + return await self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION) + + async def execute(self, sql, params=None): + return await self._trace_sql(self.__wrapped__.execute, sql, params) + + async def executemany(self, sql, param_list): + return await self._trace_sql(self.__wrapped__.executemany, sql, param_list) + + def _bake_sql(self, sql): + """ + Method to turn the "sql" argument into a string. Most database backends simply return + the given object, as it is already a string + """ + return sql + + async def _trace_sql(self, method, sql, params, action=QUERY_ACTION, **kwargs): + sql_string = self._bake_sql(sql) + if action == EXEC_ACTION: + signature = sql_string + "()" + else: + signature = self.extract_signature(sql_string) + + # Truncate sql_string to 10000 characters to prevent large queries from + # causing an error to APM server. + sql_string = shorten(sql_string, string_length=10000) + + async with async_capture_span( + signature, + span_type="db", + span_subtype=self.provider_name, + span_action=action, + extra={ + "db": {"type": "sql", "statement": sql_string, "instance": getattr(self, "_self_database", None)}, + "destination": self._self_destination_info, + }, + skip_frames=1, + leaf=True, + ) as span: + if params is None: + result = await method(sql, **kwargs) + else: + result = await method(sql, params, **kwargs) + # store "rows affected", but only for DML queries like insert/update/delete + if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES): + span.update_context("db", {"rows_affected": self.rowcount}) + return result + + def extract_signature(self, sql): + raise NotImplementedError() + + +class AsyncConnectionProxy(wrapt.ObjectProxy): + cursor_proxy = AsyncCursorProxy + + def __init__(self, wrapped, destination_info=None): + super(AsyncConnectionProxy, self).__init__(wrapped) + self._self_destination_info = destination_info + + def cursor(self, *args, **kwargs): + return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info) + + +class AsyncDbApi2Instrumentation(AsyncAbstractInstrumentedModule): + connect_method = None + + async def call(self, module, method, wrapped, instance, args, kwargs): + return AsyncConnectionProxy(await wrapped(*args, **kwargs)) + + async def call_if_sampling(self, module, method, wrapped, instance, args, kwargs): + # Contrasting to the superclass implementation, we *always* want to + # return a proxied connection, even if there is no ongoing elasticapm + # transaction yet. This ensures that we instrument the cursor once + # the transaction started. + return await self.call(module, method, wrapped, instance, args, kwargs) diff --git a/elasticapm/instrumentation/packages/asyncio/psycopg_async.py b/elasticapm/instrumentation/packages/asyncio/psycopg_async.py new file mode 100644 index 000000000..dda6db1dd --- /dev/null +++ b/elasticapm/instrumentation/packages/asyncio/psycopg_async.py @@ -0,0 +1,113 @@ +# BSD 3-Clause License +# +# Copyright (c) 2019, Elasticsearch BV +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import + +from elasticapm.contrib.asyncio.traces import async_capture_span +from elasticapm.instrumentation.packages.asyncio.dbapi2_asyncio import ( + AsyncConnectionProxy, + AsyncCursorProxy, + AsyncDbApi2Instrumentation, +) +from elasticapm.instrumentation.packages.dbapi2 import extract_signature +from elasticapm.instrumentation.packages.psycopg2 import get_destination_info + + +class PGAsyncCursorProxy(AsyncCursorProxy): + provider_name = "postgresql" + + def _bake_sql(self, sql): + # If this is a Composable object, use its `as_string` method. + # See https://www.psycopg.org/psycopg3/docs/api/sql.html + if hasattr(sql, "as_string"): + sql = sql.as_string(self.__wrapped__) + # If the sql string is already a byte string, we need to decode it using the connection encoding + if isinstance(sql, bytes): + sql = sql.decode(self.connection.info.encoding) + return sql + + def extract_signature(self, sql): + return extract_signature(sql) + + async def execute(self, query, params=None, **kwargs): + return await self._trace_sql(self.__wrapped__.execute, query, params, **kwargs) + + async def executemany(self, query, params_seq, **kwargs): + return await self._trace_sql(self.__wrapped__.executemany, query, params_seq, **kwargs) + + async def __aenter__(self): + return PGAsyncCursorProxy(await self.__wrapped__.__aenter__(), destination_info=self._self_destination_info) + + async def __aexit__(self, *args): + return PGAsyncCursorProxy(await self.__wrapped__.__aexit__(*args), destination_info=self._self_destination_info) + + def __aiter__(self): + return self.__wrapped__.__aiter__() + + @property + def _self_database(self): + return self.connection.info.dbname or "" + + +class PGAsyncConnectionProxy(AsyncConnectionProxy): + cursor_proxy = PGAsyncCursorProxy + + async def __aenter__(self): + return PGAsyncConnectionProxy(await self.__wrapped__.__aenter__(), destination_info=self._self_destination_info) + + async def __aexit__(self, *args): + return PGAsyncConnectionProxy( + await self.__wrapped__.__aexit__(*args), destination_info=self._self_destination_info + ) + + +class AsyncPsycopgInstrumentation(AsyncDbApi2Instrumentation): + name = "psycopg_async" + + instrument_list = [("psycopg.connection_async", "AsyncConnection.connect")] + + async def call(self, module, method, wrapped, instance, args, kwargs): + signature = "psycopg.connect_async" + + host, port = get_destination_info(kwargs.get("host"), kwargs.get("port")) + database = kwargs.get("dbname") + signature = f"{signature} {host}:{port}" # noqa: E231 + destination_info = { + "address": host, + "port": port, + } + async with async_capture_span( + signature, + span_type="db", + span_subtype="postgresql", + span_action="connect", + leaf=True, + extra={"destination": destination_info, "db": {"type": "sql", "instance": database}}, + ): + return PGAsyncConnectionProxy(await wrapped(*args, **kwargs), destination_info=destination_info) diff --git a/elasticapm/instrumentation/packages/azure.py b/elasticapm/instrumentation/packages/azure.py index 4200bb42b..934dbb17b 100644 --- a/elasticapm/instrumentation/packages/azure.py +++ b/elasticapm/instrumentation/packages/azure.py @@ -300,9 +300,12 @@ def handle_azuretable(request, hostname, path, query_params, service, service_ty account_name = hostname.split(".")[0] method = request.method body = request.body - try: - body = json.loads(body) - except json.decoder.JSONDecodeError: # str not bytes + if body: + try: + body = json.loads(body) + except json.decoder.JSONDecodeError: # str not bytes + body = {} + else: body = {} # /tablename(PartitionKey='',RowKey='') resource_name = path.split("/", 1)[1] if "/" in path else path @@ -313,7 +316,7 @@ def handle_azuretable(request, hostname, path, query_params, service, service_ty } operation_name = "Unknown" - if method.lower() == "put": + if method.lower() == "put" or method.lower() == "patch": operation_name = "Update" if "properties" in query_params.get("comp", []): operation_name = "SetProperties" diff --git a/elasticapm/instrumentation/packages/dbapi2.py b/elasticapm/instrumentation/packages/dbapi2.py index 8315bd8be..d903d41f8 100644 --- a/elasticapm/instrumentation/packages/dbapi2.py +++ b/elasticapm/instrumentation/packages/dbapi2.py @@ -34,6 +34,7 @@ """ import re +import string import wrapt @@ -76,8 +77,8 @@ def _scan_for_table_with_tokens(tokens, keyword): def tokenize(sql): - # split on anything that is not a word character, excluding dots - return [t for t in re.split(r"([^\w.])", sql) if t != ""] + # split on anything that is not a word character or a square bracket, excluding dots + return [t for t in re.split(r"([^\w.\[\]])", sql) if t != ""] def scan(tokens): @@ -85,6 +86,7 @@ def scan(tokens): literal_started = None prev_was_escape = False lexeme = [] + digits = set(string.digits) i = 0 while i < len(tokens): @@ -114,6 +116,11 @@ def scan(tokens): literal_start_idx = i literal_started = token elif token == "$": + # exclude query parameters that have a digit following the dollar + if True and len(tokens) > i + 1 and tokens[i + 1] in digits: + yield i, token + i += 1 + continue # Postgres can use arbitrary characters between two $'s as a # literal separation token, e.g.: $fish$ literal $fish$ # This part will detect that and skip over the literal. @@ -170,30 +177,46 @@ def extract_signature(sql): keyword = "INTO" if sql_type == "INSERT" else "FROM" sql_type = sql_type + " " + keyword - table_name = look_for_table(sql, keyword) + object_name = look_for_table(sql, keyword) elif sql_type in ["CREATE", "DROP"]: # 2nd word is part of SQL type sql_type = sql_type + sql[first_space:second_space] - table_name = "" + object_name = "" elif sql_type == "UPDATE": - table_name = look_for_table(sql, "UPDATE") + object_name = look_for_table(sql, "UPDATE") elif sql_type == "SELECT": # Name is first table try: sql_type = "SELECT FROM" - table_name = look_for_table(sql, "FROM") + object_name = look_for_table(sql, "FROM") except Exception: - table_name = "" + object_name = "" + elif sql_type in ["EXEC", "EXECUTE"]: + sql_type = "EXECUTE" + end = second_space if second_space > first_space else len(sql) + object_name = sql[first_space + 1 : end] + elif sql_type == "CALL": + first_paren = sql.find("(", first_space) + end = first_paren if first_paren > first_space else len(sql) + procedure_name = sql[first_space + 1 : end].rstrip(";") + object_name = procedure_name + "()" else: # No name - table_name = "" + object_name = "" - signature = " ".join(filter(bool, [sql_type, table_name])) + signature = " ".join(filter(bool, [sql_type, object_name])) return signature QUERY_ACTION = "query" EXEC_ACTION = "exec" +PROCEDURE_STATEMENTS = ["EXEC", "EXECUTE", "CALL"] + + +def extract_action_from_signature(signature, default): + if signature.split(" ")[0] in PROCEDURE_STATEMENTS: + return EXEC_ACTION + return default class CursorProxy(wrapt.ObjectProxy): @@ -220,12 +243,13 @@ def _bake_sql(self, sql): """ return sql - def _trace_sql(self, method, sql, params, action=QUERY_ACTION): + def _trace_sql(self, method, sql, params, action=QUERY_ACTION, **kwargs): sql_string = self._bake_sql(sql) if action == EXEC_ACTION: signature = sql_string + "()" else: signature = self.extract_signature(sql_string) + action = extract_action_from_signature(signature, action) # Truncate sql_string to 10000 characters to prevent large queries from # causing an error to APM server. @@ -244,9 +268,9 @@ def _trace_sql(self, method, sql, params, action=QUERY_ACTION): leaf=True, ) as span: if params is None: - result = method(sql) + result = method(sql, **kwargs) else: - result = method(sql, params) + result = method(sql, params, **kwargs) # store "rows affected", but only for DML queries like insert/update/delete if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES): span.update_context("db", {"rows_affected": self.rowcount}) diff --git a/elasticapm/instrumentation/packages/kafka.py b/elasticapm/instrumentation/packages/kafka.py index c3bc2d64d..ab9ebd1a4 100644 --- a/elasticapm/instrumentation/packages/kafka.py +++ b/elasticapm/instrumentation/packages/kafka.py @@ -143,7 +143,8 @@ def call(self, module, method, wrapped, instance, args, kwargs): try: result = wrapped(*args, **kwargs) except StopIteration: - span.cancel() + if span: + span.cancel() raise if span and not isinstance(span, DroppedSpan): topic = result[0] diff --git a/elasticapm/instrumentation/packages/mysql_connector.py b/elasticapm/instrumentation/packages/mysql_connector.py index a411af1c0..6b82ce4a8 100644 --- a/elasticapm/instrumentation/packages/mysql_connector.py +++ b/elasticapm/instrumentation/packages/mysql_connector.py @@ -46,9 +46,8 @@ def extract_signature(self, sql): @property def _self_database(self) -> str: - # for unknown reasons, the connection is available as the `_connection` attribute on Python 3.6, - # and as `_cnx` on later Python versions - connection = getattr(self, "_cnx") or getattr(self, "_connection") + # it looks like the connection is available as the `_connection` or as `_cnx` depending on Python versions + connection = getattr(self, "_connection", None) or getattr(self, "_cnx", None) return connection.database if connection else "" diff --git a/elasticapm/instrumentation/packages/psycopg.py b/elasticapm/instrumentation/packages/psycopg.py index 3dbcf5a0a..0d79cf686 100644 --- a/elasticapm/instrumentation/packages/psycopg.py +++ b/elasticapm/instrumentation/packages/psycopg.py @@ -55,6 +55,12 @@ def _bake_sql(self, sql): def extract_signature(self, sql): return extract_signature(sql) + def execute(self, query, params=None, **kwargs): + return self._trace_sql(self.__wrapped__.execute, query, params, **kwargs) + + def executemany(self, query, params_seq, **kwargs): + return self._trace_sql(self.__wrapped__.executemany, query, params_seq, **kwargs) + def __enter__(self): return PGCursorProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info) diff --git a/elasticapm/instrumentation/packages/psycopg2.py b/elasticapm/instrumentation/packages/psycopg2.py index a058597da..850d849e3 100644 --- a/elasticapm/instrumentation/packages/psycopg2.py +++ b/elasticapm/instrumentation/packages/psycopg2.py @@ -59,6 +59,12 @@ def _bake_sql(self, sql): def extract_signature(self, sql): return extract_signature(sql) + def execute(self, query, vars=None): + return self._trace_sql(self.__wrapped__.execute, query, vars) + + def executemany(self, query, vars_list): + return self._trace_sql(self.__wrapped__.executemany, query, vars_list) + def __enter__(self): return PGCursorProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info) diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py index 2f0ed04a0..c1f829bc1 100644 --- a/elasticapm/instrumentation/packages/tornado.py +++ b/elasticapm/instrumentation/packages/tornado.py @@ -71,7 +71,9 @@ async def call(self, module, method, wrapped, instance, args, kwargs): client = instance.application.elasticapm_client should_ignore = client.should_ignore_url(request.path) if not should_ignore: - trace_parent = TraceParent.from_headers(request.headers) + # In tornado 6.5.3 the __in__ protocol for the headers is case-sensitive so we need to normalize them + normalized_headers = {k.lower(): v for k, v in request.headers.items()} + trace_parent = TraceParent.from_headers(normalized_headers) client.begin_transaction("request", trace_parent=trace_parent) elasticapm.set_context( lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request" diff --git a/elasticapm/instrumentation/packages/urllib.py b/elasticapm/instrumentation/packages/urllib.py index b40932a55..2b0dae16e 100644 --- a/elasticapm/instrumentation/packages/urllib.py +++ b/elasticapm/instrumentation/packages/urllib.py @@ -97,10 +97,9 @@ def call(self, module, method, wrapped, instance, args, kwargs): leaf_span.dist_tracing_propagated = True response = wrapped(*args, **kwargs) if response: - status = getattr(response, "status", None) or response.getcode() # Python 2 compat if span.context: - span.context["http"]["status_code"] = status - span.set_success() if status < 400 else span.set_failure() + span.context["http"]["status_code"] = response.status + span.set_success() if response.status < 400 else span.set_failure() return response def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction): diff --git a/elasticapm/instrumentation/packages/urllib3.py b/elasticapm/instrumentation/packages/urllib3.py index cc7206e83..93d9c3392 100644 --- a/elasticapm/instrumentation/packages/urllib3.py +++ b/elasticapm/instrumentation/packages/urllib3.py @@ -61,12 +61,7 @@ def update_headers(args, kwargs, instance, transaction, trace_parent): :param trace_parent: the TraceParent object :return: an (args, kwargs) tuple """ - from urllib3._version import __version__ as urllib3_version - - if urllib3_version.startswith("2") and len(args) >= 5 and args[4]: - headers = args[4].copy() - args = tuple(itertools.chain((args[:4]), (headers,), args[5:])) - elif len(args) >= 4 and args[3]: + if len(args) >= 4 and args[3]: headers = args[3].copy() args = tuple(itertools.chain((args[:3]), (headers,), args[4:])) elif "headers" in kwargs and kwargs["headers"]: diff --git a/elasticapm/instrumentation/register.py b/elasticapm/instrumentation/register.py index 2c3ab71a9..b37aff1e9 100644 --- a/elasticapm/instrumentation/register.py +++ b/elasticapm/instrumentation/register.py @@ -94,6 +94,7 @@ "elasticapm.instrumentation.packages.asyncio.starlette.StarletteServerErrorMiddlewareInstrumentation", "elasticapm.instrumentation.packages.asyncio.redis_asyncio.RedisAsyncioInstrumentation", "elasticapm.instrumentation.packages.asyncio.redis_asyncio.RedisPipelineInstrumentation", + "elasticapm.instrumentation.packages.asyncio.psycopg_async.AsyncPsycopgInstrumentation", "elasticapm.instrumentation.packages.grpc.GRPCAsyncServerInstrumentation", ] ) diff --git a/elasticapm/traces.py b/elasticapm/traces.py index 583505070..929458d7a 100644 --- a/elasticapm/traces.py +++ b/elasticapm/traces.py @@ -531,7 +531,7 @@ def __init__( span_subtype: Optional[str] = None, span_action: Optional[str] = None, sync: Optional[bool] = None, - start: Optional[int] = None, + start: Optional[float] = None, links: Optional[Sequence[TraceParent]] = None, ) -> None: """ @@ -1044,7 +1044,7 @@ def __init__( labels: Optional[dict] = None, span_subtype: Optional[str] = None, span_action: Optional[str] = None, - start: Optional[int] = None, + start: Optional[float] = None, duration: Optional[Union[float, timedelta]] = None, sync: Optional[bool] = None, links: Optional[Sequence[TraceParent]] = None, diff --git a/elasticapm/transport/base.py b/elasticapm/transport/base.py index 487e13a93..b81960907 100644 --- a/elasticapm/transport/base.py +++ b/elasticapm/transport/base.py @@ -250,6 +250,7 @@ def _flush(self, buffer, forced_flush=False) -> None: """ if not self.state.should_try(): logger.error("dropping flushed data due to transport failure back-off") + buffer.close() else: fileobj = buffer.fileobj # get a reference to the fileobj before closing the gzip file buffer.close() @@ -261,6 +262,8 @@ def _flush(self, buffer, forced_flush=False) -> None: except Exception as e: self.handle_transport_fail(e) + data.release() + def start_thread(self, pid=None) -> None: super(Transport, self).start_thread(pid=pid) if (not self._thread or self.pid != self._thread.pid) and not self._closed: @@ -292,7 +295,8 @@ def close(self) -> None: if not self._flushed.wait(timeout=self._max_flush_time_seconds): logger.error("Closing the transport connection timed out.") - stop_thread = close + def stop_thread(self) -> None: + self.close() def flush(self): """ diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py index 17ae50ba3..cd960e40f 100644 --- a/elasticapm/transport/http.py +++ b/elasticapm/transport/http.py @@ -32,6 +32,7 @@ import hashlib import json +import os import re import ssl import urllib.parse @@ -202,7 +203,11 @@ def _get_cache_control_max_age(self, response_headers): def _process_queue(self) -> None: if not self.client.server_version: - self.fetch_server_info() + # this is useful on aws lambda environments where this call incurs in unwanted latency + if self.client.config.skip_server_info: + logger.debug("Skipping to fetch server info") + else: + self.fetch_server_info() super()._process_queue() def fetch_server_info(self) -> None: @@ -250,6 +255,23 @@ def ca_certs(self): return self._server_ca_cert_file return certifi.where() if (certifi and self.client.config.use_certifi) else None + def close(self): + """ + Take care of being able to shutdown cleanly + :return: + """ + if self._closed or (not self._thread or self._thread.pid != os.getpid()): + return + + self._closed = True + # we are racing against urllib3 ConnectionPool weakref finalizer that would lead to having them closed + # and we hanging waiting for send any eventual queued data + # Force the creation of a new PoolManager so that we are always able to flush + self._http = None + self.queue("close", None) + if not self._flushed.wait(timeout=self._max_flush_time_seconds): + logger.error("Closing the transport connection timed out.") + def version_string_to_tuple(version): if version: diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py index 58a302960..4403f5abd 100644 --- a/elasticapm/utils/__init__.py +++ b/elasticapm/utils/__init__.py @@ -33,20 +33,14 @@ import re import socket import urllib.parse -from functools import partial +from functools import partial, partialmethod from types import FunctionType from typing import Pattern from elasticapm.conf import constants from elasticapm.utils import encoding -try: - from functools import partialmethod - - partial_types = (partial, partialmethod) -except ImportError: - # Python 2 - partial_types = (partial,) +partial_types = (partial, partialmethod) default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433} @@ -84,6 +78,8 @@ def get_name_from_func(func: FunctionType) -> str: return "partial({})".format(get_name_from_func(func.func)) elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"): return "partial({})".format(get_name_from_func(func._partialmethod.func)) + elif hasattr(func, "__partialmethod__") and hasattr(func.__partialmethod__, "func"): + return "partial({})".format(get_name_from_func(func.__partialmethod__.func)) module = func.__module__ diff --git a/elasticapm/utils/encoding.py b/elasticapm/utils/encoding.py index 4455f2685..fedaa8d63 100644 --- a/elasticapm/utils/encoding.py +++ b/elasticapm/utils/encoding.py @@ -36,6 +36,11 @@ import uuid from decimal import Decimal +try: + from django.db.models import QuerySet as DjangoQuerySet +except ImportError: + DjangoQuerySet = None + from elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES, LONG_FIELD_MAX_LENGTH PROTECTED_TYPES = (int, type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time) @@ -144,6 +149,14 @@ class value_type(list): ret = float(value) elif isinstance(value, int): ret = int(value) + elif ( + DjangoQuerySet is not None + and isinstance(value, DjangoQuerySet) + and getattr(value, "_result_cache", True) is None + ): + # if we have a Django QuerySet a None result cache it may mean that the underlying query failed + # so represent it as unevaluated instead of retrying the query again + ret = "<%s `unevaluated`>" % (value.__class__.__name__) elif value is not None: try: ret = transform(repr(value)) diff --git a/elasticapm/utils/json_encoder.py b/elasticapm/utils/json_encoder.py index c40e0accd..3918bb233 100644 --- a/elasticapm/utils/json_encoder.py +++ b/elasticapm/utils/json_encoder.py @@ -31,13 +31,9 @@ import datetime import decimal +import json import uuid -try: - import json -except ImportError: - import simplejson as json - class BetterJSONEncoder(json.JSONEncoder): ENCODERS = { diff --git a/elasticapm/utils/simplejson_encoder.py b/elasticapm/utils/simplejson_encoder.py new file mode 100644 index 000000000..f538ffdac --- /dev/null +++ b/elasticapm/utils/simplejson_encoder.py @@ -0,0 +1,58 @@ +# BSD 3-Clause License +# +# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details +# Copyright (c) 2019, Elasticsearch BV +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + + +import simplejson as json + +from elasticapm.utils.json_encoder import BetterJSONEncoder + + +class BetterSimpleJSONEncoder(json.JSONEncoder): + ENCODERS = BetterJSONEncoder.ENCODERS + + def default(self, obj): + if type(obj) in self.ENCODERS: + return self.ENCODERS[type(obj)](obj) + try: + return super(BetterSimpleJSONEncoder, self).default(obj) + except TypeError: + return str(obj) + + +def better_decoder(data): + return data + + +def dumps(value, **kwargs): + return json.dumps(value, cls=BetterSimpleJSONEncoder, ignore_nan=True, **kwargs) + + +def loads(value, **kwargs): + return json.loads(value, object_hook=better_decoder) diff --git a/elasticapm/version.py b/elasticapm/version.py index e817f4ef1..c55a11c61 100644 --- a/elasticapm/version.py +++ b/elasticapm/version.py @@ -28,5 +28,5 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -__version__ = (6, 19, 0) +__version__ = (6, 25, 0) VERSION = ".".join(map(str, __version__)) diff --git a/pyproject.toml b/pyproject.toml index 167532517..019a7b666 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ exclude = ''' | _build | build | dist - | elasticapm/utils/wrapt # The following are specific to Black, you probably don't want those. | blib2to3 diff --git a/scripts/run-tests.bat b/scripts/run-tests.bat index 0fce63000..f17e57d9c 100644 --- a/scripts/run-tests.bat +++ b/scripts/run-tests.bat @@ -9,14 +9,12 @@ set VENV_PYTHON=%cd%\venv\Scripts\ set COVERAGE_FILE=.coverage.windows.%VERSION%.%FRAMEWORK%.%ASYNCIO% -set IGNORE_PYTHON3_WITH_PYTHON2= -if "%VERSION%" == "2.7" set IGNORE_PYTHON3_WITH_PYTHON2=--ignore-glob="*\py3_*.py" set PYTEST_JUNIT="--junitxml=.\tests\windows-%VERSION%-%FRAMEWORK%-%ASYNCIO%-python-agent-junit.xml" if "%ASYNCIO%" == "true" ( - %VENV_PYTHON%\python.exe -m pytest %PYTEST_JUNIT% %IGNORE_PYTHON3_WITH_PYTHON2% --cov --cov-context=test --cov-branch --cov-config=setup.cfg -m "not integrationtest" || exit /b 1 + %VENV_PYTHON%\python.exe -m pytest %PYTEST_JUNIT% --cov --cov-context=test --cov-branch --cov-config=setup.cfg -m "not integrationtest" || exit /b 1 ) if "%ASYNCIO%" == "false" ( - %VENV_PYTHON%\python.exe -m pytest %PYTEST_JUNIT% --ignore-glob="*\asyncio*\*" %IGNORE_PYTHON3_WITH_PYTHON2% --cov --cov-context=test --cov-branch --cov-config=setup.cfg -m "not integrationtest" || exit /b 1 + %VENV_PYTHON%\python.exe -m pytest %PYTEST_JUNIT% --ignore-glob="*\asyncio*\*" --cov --cov-context=test --cov-branch --cov-config=setup.cfg -m "not integrationtest" || exit /b 1 ) call %VENV_PYTHON%\python.exe setup.py bdist_wheel diff --git a/setup.cfg b/setup.cfg index ce33450a6..0a56a9b01 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,6 +22,7 @@ classifiers = Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy License :: OSI Approved :: BSD License @@ -39,7 +40,7 @@ zip_safe = false install_requires = urllib3!=2.0.0,<3.0.0 certifi - wrapt>=1.14.1,<1.15.0 # https://github.com/elastic/apm-agent-python/issues/1894 + wrapt>=1.14.1,!=1.15.0 # https://github.com/elastic/apm-agent-python/issues/1894 ecs_logging test_suite=tests @@ -111,6 +112,7 @@ markers = aiobotocore kafka grpc + azurestorage addopts=--random-order [isort] diff --git a/setup.py b/setup.py index 23ebec33e..ffb038307 100644 --- a/setup.py +++ b/setup.py @@ -40,48 +40,19 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error -# in multiprocessing/util.py _exit_function when running `python -# setup.py test` (see -# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) -for m in ("multiprocessing", "billiard"): - try: - __import__(m) - except ImportError: - pass - import ast -import codecs import os -import sys -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError - -import pkg_resources -from setuptools import Extension, setup -from setuptools.command.test import test as TestCommand - -pkg_resources.require("setuptools>=39.2") - - -class PyTest(TestCommand): - user_options = [("pytest-args=", "a", "Arguments to pass to py.test")] - def initialize_options(self) -> None: - TestCommand.initialize_options(self) - self.pytest_args = [] +from setuptools import setup - def finalize_options(self) -> None: - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True +try: + import importlib.metadata - def run_tests(self) -> None: - # import here, cause outside the eggs aren't loaded - import pytest + importlib.metadata.requires("setuptools") +except ImportError: + import pkg_resources - errno = pytest.main(self.pytest_args) - sys.exit(errno) + pkg_resources.require("setuptools") def get_version(): @@ -96,12 +67,16 @@ def get_version(): :return: a string, indicating the version """ - version_file = codecs.open(os.path.join("elasticapm", "version.py"), encoding="utf-8") + version_file = open(os.path.join("elasticapm", "version.py"), encoding="utf-8") for line in version_file: if line.startswith("__version__"): version_tuple = ast.literal_eval(line.split(" = ")[1]) - return ".".join(map(str, version_tuple)) + version_str = ".".join(map(str, version_tuple)) + post_version = os.getenv("ELASTIC_CI_POST_VERSION") + if post_version: + return f"{version_str}.post{post_version}" + return version_str return "unknown" -setup(cmdclass={"test": PyTest}, version=get_version()) +setup(version=get_version()) diff --git a/tests/Dockerfile b/tests/Dockerfile index cf1a8e30b..c5cd8050a 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -44,4 +44,7 @@ RUN chmod +x /usr/local/bin/entrypoint.sh WORKDIR /app +# configure the label to help with the GitHub container registry +LABEL org.opencontainers.image.source https://github.com/elastic/apm-agent-python + ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/tests/client/client_tests.py b/tests/client/client_tests.py index 6cec88205..e42ada12c 100644 --- a/tests/client/client_tests.py +++ b/tests/client/client_tests.py @@ -47,7 +47,13 @@ import elasticapm from elasticapm.base import Client +from elasticapm.conf import _in_fips_mode from elasticapm.conf.constants import ERROR + +try: + from elasticapm.utils.simplejson_encoder import dumps as simplejson_dumps +except ImportError: + simplejson_dumps = None from tests.fixtures import DummyTransport, TempStoreClient from tests.utils import assert_any_record_contains @@ -77,11 +83,7 @@ def test_service_info_node_name(elasticapm_client): def test_process_info(elasticapm_client): process_info = elasticapm_client.get_process_info() assert process_info["pid"] == os.getpid() - if hasattr(os, "getppid"): - assert process_info["ppid"] == os.getppid() - else: - # Windows + Python 2.7 - assert process_info["ppid"] is None + assert process_info["ppid"] == os.getppid() assert "argv" not in process_info elasticapm_client.config.update("1", include_process_args=True) with mock.patch.object(sys, "argv", ["a", "b", "c"]): @@ -232,6 +234,14 @@ def test_custom_transport(elasticapm_client): assert isinstance(elasticapm_client._transport, DummyTransport) +@pytest.mark.skipif(simplejson_dumps is None, reason="no test without simplejson") +@pytest.mark.parametrize( + "elasticapm_client", [{"transport_json_serializer": "elasticapm.utils.simplejson_encoder.dumps"}], indirect=True +) +def test_custom_transport_json_serializer(elasticapm_client): + assert elasticapm_client._transport._json_serializer == simplejson_dumps + + @pytest.mark.parametrize("elasticapm_client", [{"processors": []}], indirect=True) def test_empty_processor_list(elasticapm_client): assert elasticapm_client.processors == [] @@ -264,33 +274,44 @@ def test_send_remote_failover_sync(should_try, sending_elasticapm_client, caplog assert not sending_elasticapm_client._transport.state.did_fail() -@mock.patch("elasticapm.transport.http.Transport.send") -@mock.patch("elasticapm.transport.base.TransportState.should_try") -def test_send_remote_failover_sync_non_transport_exception_error(should_try, http_send, caplog): - should_try.return_value = True - +@mock.patch("elasticapm.transport.base.TransportState.should_try", return_value=True) +def test_send_remote_failover_sync_non_transport_exception_error(should_try, caplog): client = Client( server_url="http://example.com", service_name="app_name", secret_token="secret", - transport_class="elasticapm.transport.http.Transport", + transport_class="tests.fixtures.MockSendHTTPTransport", metrics_interval="0ms", metrics_sets=[], ) + # test error - http_send.side_effect = ValueError("oopsie") + client._transport.send_mock.side_effect = ValueError("oopsie") with caplog.at_level("ERROR", "elasticapm.transport"): client.capture_message("foo", handled=False) - client._transport.flush() + try: + client._transport.flush() + except ValueError: + # give flush a bit more room because we may take a bit more than the max timeout to flush + client._transport._flushed.wait(timeout=1) assert client._transport.state.did_fail() assert_any_record_contains(caplog.records, "oopsie", "elasticapm.transport") # test recovery - http_send.side_effect = None + client._transport.send_mock.side_effect = None client.capture_message("foo", handled=False) - client.close() + try: + client._transport.flush() + except ValueError: + # give flush a bit more room because we may take a bit more than the max timeout to flush + client._transport._flushed.wait(timeout=1) + # We have a race here with the queue where we would end up checking for did_fail before the message + # is being handled by the queue, so sleep a bit and retry to give it enough time + retries = 0 + while client._transport.state.did_fail() and retries < 3: + time.sleep(0.1) + retries += 1 assert not client._transport.state.did_fail() - client.close() @pytest.mark.parametrize("validating_httpserver", [{"skip_validate": True}], indirect=True) @@ -341,6 +362,7 @@ def test_call_end_twice(elasticapm_client): elasticapm_client.end_transaction("test-transaction", 200) +@pytest.mark.skipif(_in_fips_mode() is True, reason="cannot disable verify_server_cert in fips mode") @pytest.mark.parametrize("elasticapm_client", [{"verify_server_cert": False}], indirect=True) def test_client_disables_ssl_verification(elasticapm_client): assert not elasticapm_client.config.verify_server_cert diff --git a/tests/config/tests.py b/tests/config/tests.py index b69a0fe1d..284f5694a 100644 --- a/tests/config/tests.py +++ b/tests/config/tests.py @@ -39,13 +39,17 @@ import mock import pytest +import elasticapm.conf from elasticapm.conf import ( Config, ConfigurationError, EnumerationValidator, + ExcludeRangeValidator, FileIsReadableValidator, PrecisionValidator, RegexValidator, + SupportedValueInFipsModeValidator, + UnitValidator, VersionedConfig, _BoolConfigValue, _ConfigBase, @@ -276,7 +280,10 @@ def test_file_is_readable_validator_not_a_file(tmpdir): assert "is not a file" in e.value.args[0] -@pytest.mark.skipif(platform.system() == "Windows", reason="os.access() doesn't seem to work as we expect on Windows") +@pytest.mark.skipif( + platform.system() == "Windows" or os.getuid() == 0, + reason="os.access() doesn't seem to work as we expect on Windows and test will fail as root user", +) def test_file_is_readable_validator_not_readable(tmpdir): p = tmpdir.join("nonreadable") p.write("") @@ -450,3 +457,71 @@ def test_config_all_upper_case(): if not isinstance(config_value, _ConfigValue): continue assert config_value.env_key == config_value.env_key.upper() + + +def test_regex_validator_without_match(): + validator = RegexValidator(r"\d") + with pytest.raises(ConfigurationError) as e: + validator("foo", "field") + assert "does not match pattern" in e.value.args[0] + + +def test_unit_validator_without_match(): + validator = RegexValidator("ms") + with pytest.raises(ConfigurationError) as e: + validator("s", "field") + assert "does not match pattern" in e.value.args[0] + + +def test_unit_validator_with_unsupported_unit(): + validator = UnitValidator(r"(\d+)(s)", "secs", {}) + with pytest.raises(ConfigurationError) as e: + validator("10s", "field") + assert "is not a supported unit" in e.value.args[0] + + +def test_precision_validator_not_a_float(): + validator = PrecisionValidator() + with pytest.raises(ConfigurationError) as e: + validator("notafloat", "field") + assert "is not a float" in e.value.args[0] + + +def test_exclude_range_validator_not_in_range(): + validator = ExcludeRangeValidator(1, 100, "desc") + with pytest.raises(ConfigurationError) as e: + validator(10, "field") + assert "cannot be in range" in e.value.args[0] + + +def test_supported_value_in_fips_mode_validator_in_fips_mode_with_invalid_value(monkeypatch): + monkeypatch.setattr(elasticapm.conf, "_in_fips_mode", lambda: True) + exception_message = "VERIFY_SERVER_CERT=False must be set to True if FIPS mode is enabled" + validator = SupportedValueInFipsModeValidator(supported_value=True) + with pytest.raises(ConfigurationError) as e: + validator(False, "VERIFY_SERVER_CERT") + assert exception_message == e.value.args[0] + + config = Config({"VERIFY_SERVER_CERT": False}) + assert config.errors["VERIFY_SERVER_CERT"] == exception_message + + +def test_supported_value_in_fips_mode_validator_in_fips_mode_with_valid_value(monkeypatch): + monkeypatch.setattr(elasticapm.conf, "_in_fips_mode", lambda: True) + validator = SupportedValueInFipsModeValidator(supported_value=True) + assert validator(True, "VERIFY_SERVER_CERT") == True + config = Config({"VERIFY_SERVER_CERT": True}) + assert config.verify_server_cert == True + assert "VERIFY_SERVER_CERT" not in config.errors + + +def test_supported_value_in_fips_mode_validator_not_in_fips_mode(monkeypatch): + monkeypatch.setattr(elasticapm.conf, "_in_fips_mode", lambda: False) + validator = SupportedValueInFipsModeValidator(supported_value=True) + assert validator(True, "field") == True + assert validator(False, "field") == False + + config = Config({"VERIFY_SERVER_CERT": False}) + assert not config.errors + config = Config({"VERIFY_SERVER_CERT": True}) + assert not config.errors diff --git a/tests/contrib/asgi/app.py b/tests/contrib/asgi/app.py index a919b2cef..352720135 100644 --- a/tests/contrib/asgi/app.py +++ b/tests/contrib/asgi/app.py @@ -59,3 +59,8 @@ async def boom() -> None: @app.route("/body") async def json(): return jsonify({"hello": "world"}) + + +@app.route("/500", methods=["GET"]) +async def error(): + return "KO", 500 diff --git a/tests/contrib/asgi/asgi_tests.py b/tests/contrib/asgi/asgi_tests.py index 824a23b68..632875e3e 100644 --- a/tests/contrib/asgi/asgi_tests.py +++ b/tests/contrib/asgi/asgi_tests.py @@ -45,7 +45,7 @@ def instrumented_app(elasticapm_client): @pytest.mark.asyncio -async def test_transaction_span(instrumented_app, elasticapm_client): +async def test_transaction_span_success(instrumented_app, elasticapm_client): async with async_asgi_testclient.TestClient(instrumented_app) as client: resp = await client.get("/") assert resp.status_code == 200 @@ -66,6 +66,47 @@ async def test_transaction_span(instrumented_app, elasticapm_client): assert span["sync"] == False +@pytest.mark.asyncio +async def test_transaction_span_failure(instrumented_app, elasticapm_client): + async with async_asgi_testclient.TestClient(instrumented_app) as client: + resp = await client.get("/500") + assert resp.status_code == 500 + assert resp.text == "KO" + + assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 + assert len(elasticapm_client.events[constants.SPAN]) == 0 + transaction = elasticapm_client.events[constants.TRANSACTION][0] + assert transaction["name"] == "GET unknown route" + assert transaction["result"] == "HTTP 5xx" + assert transaction["outcome"] == "failure" + assert transaction["context"]["request"]["url"]["full"] == "/500" + assert transaction["context"]["response"]["status_code"] == 500 + + +@pytest.mark.asyncio +async def test_transaction_traceparent(instrumented_app, elasticapm_client): + async with async_asgi_testclient.TestClient(instrumented_app) as client: + resp = await client.get("/", headers={"traceparent": "00-12345678901234567890123456789012-1234567890123456-01"}) + assert resp.status_code == 200 + assert resp.text == "OK" + + assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 + assert len(elasticapm_client.events[constants.SPAN]) == 1 + transaction = elasticapm_client.events[constants.TRANSACTION][0] + span = elasticapm_client.events[constants.SPAN][0] + assert transaction["name"] == "GET unknown route" + assert transaction["result"] == "HTTP 2xx" + assert transaction["outcome"] == "success" + assert transaction["context"]["request"]["url"]["full"] == "/" + assert transaction["context"]["response"]["status_code"] == 200 + + assert transaction["trace_id"] == "12345678901234567890123456789012" + + assert span["name"] == "sleep" + assert span["outcome"] == "success" + assert span["sync"] == False + + @pytest.mark.asyncio async def test_transaction_ignore_url(instrumented_app, elasticapm_client): elasticapm_client.config.update("1", transaction_ignore_urls="/foo*") diff --git a/tests/contrib/asyncio/starlette_tests.py b/tests/contrib/asyncio/starlette_tests.py index 5f4c070bd..055cf01ad 100644 --- a/tests/contrib/asyncio/starlette_tests.py +++ b/tests/contrib/asyncio/starlette_tests.py @@ -28,8 +28,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from shutil import ExecError - from tests.fixtures import TempStoreClient import pytest # isort:skip @@ -110,6 +108,10 @@ async def with_slash(request): async def without_slash(request): return PlainTextResponse("Hi {}".format(request.path_params["name"])) + @app.route("/500/", methods=["GET"]) + async def with_500_status_code(request): + return PlainTextResponse("Oops", status_code=500) + @sub.route("/hi") async def hi_from_sub(request): return PlainTextResponse("sub") @@ -236,6 +238,27 @@ def test_exception(app, elasticapm_client): assert error["context"]["request"] == transaction["context"]["request"] +def test_failure_outcome_with_500_status_code(app, elasticapm_client): + client = TestClient(app) + + client.get("/500/") + + assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 + transaction = elasticapm_client.events[constants.TRANSACTION][0] + spans = elasticapm_client.spans_for_transaction(transaction) + assert len(spans) == 0 + + assert transaction["name"] == "GET /500/" + assert transaction["result"] == "HTTP 5xx" + assert transaction["outcome"] == "failure" + assert transaction["type"] == "request" + request = transaction["context"]["request"] + assert request["method"] == "GET" + assert transaction["context"]["response"]["status_code"] == 500 + + assert len(elasticapm_client.events[constants.ERROR]) == 0 + + @pytest.mark.parametrize("header_name", [constants.TRACEPARENT_HEADER_NAME, constants.TRACEPARENT_LEGACY_HEADER_NAME]) def test_traceparent_handling(app, elasticapm_client, header_name): client = TestClient(app) @@ -323,7 +346,8 @@ def test_transaction_name_is_route(app, elasticapm_client): ) def test_trailing_slash_redirect_detection(app, elasticapm_client, url, expected): client = TestClient(app) - response = client.get(url, allow_redirects=False) + kwargs = {"allow_redirects": False} if starlette_version_tuple < (0, 43) else {"follow_redirects": False} + response = client.get(url, **kwargs) assert response.status_code == 307 assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 for transaction in elasticapm_client.events[constants.TRANSACTION]: @@ -534,3 +558,11 @@ def test_transaction_active_in_base_exception_handler(app, elasticapm_client): assert exc.transaction_id assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 + + +def test_middleware_without_client_arg(): + with mock.patch.dict("os.environ", {"ELASTIC_APM_SERVICE_NAME": "foo"}): + app = Starlette() + elasticapm = ElasticAPM(app) + + assert elasticapm.client.config.service_name == "foo" diff --git a/tests/contrib/asyncio/tornado/tornado_tests.py b/tests/contrib/asyncio/tornado/tornado_tests.py index 3ce3bafed..499a28550 100644 --- a/tests/contrib/asyncio/tornado/tornado_tests.py +++ b/tests/contrib/asyncio/tornado/tornado_tests.py @@ -177,7 +177,7 @@ async def test_traceparent_handling(app, base_url, http_client): assert transaction["trace_id"] == "0af7651916cd43dd8448eb211c80319c" assert transaction["parent_id"] == "b7ad6b7169203331" - assert "foo=bar,bar=baz,baz=bazzinga" == wrapped_from_string.call_args[0][0]["TraceState"] + assert "foo=bar,bar=baz,baz=bazzinga" == wrapped_from_string.call_args[0][0]["tracestate"] @pytest.mark.gen_test diff --git a/tests/contrib/django/django_tests.py b/tests/contrib/django/django_tests.py index 547d46b51..72c791280 100644 --- a/tests/contrib/django/django_tests.py +++ b/tests/contrib/django/django_tests.py @@ -270,25 +270,7 @@ def test_user_info_with_custom_user_non_string_username(django_elasticapm_client assert user_info["username"] == "1" -@pytest.mark.skipif(django.VERSION > (1, 9), reason="MIDDLEWARE_CLASSES removed in Django 2.0") def test_user_info_with_non_django_auth(django_elasticapm_client, client): - with override_settings( - INSTALLED_APPS=[app for app in settings.INSTALLED_APPS if app != "django.contrib.auth"] - ) and override_settings( - MIDDLEWARE_CLASSES=[ - m for m in settings.MIDDLEWARE_CLASSES if m != "django.contrib.auth.middleware.AuthenticationMiddleware" - ] - ): - with pytest.raises(Exception): - resp = client.get(reverse("elasticapm-raise-exc")) - - assert len(django_elasticapm_client.events[ERROR]) == 1 - event = django_elasticapm_client.events[ERROR][0] - assert event["context"]["user"] == {} - - -@pytest.mark.skipif(django.VERSION < (1, 10), reason="MIDDLEWARE new in Django 1.10") -def test_user_info_with_non_django_auth_django_2(django_elasticapm_client, client): with override_settings( INSTALLED_APPS=[app for app in settings.INSTALLED_APPS if app != "django.contrib.auth"] ) and override_settings( @@ -303,22 +285,7 @@ def test_user_info_with_non_django_auth_django_2(django_elasticapm_client, clien assert event["context"]["user"] == {} -@pytest.mark.skipif(django.VERSION > (1, 9), reason="MIDDLEWARE_CLASSES removed in Django 2.0") def test_user_info_without_auth_middleware(django_elasticapm_client, client): - with override_settings( - MIDDLEWARE_CLASSES=[ - m for m in settings.MIDDLEWARE_CLASSES if m != "django.contrib.auth.middleware.AuthenticationMiddleware" - ] - ): - with pytest.raises(Exception): - client.get(reverse("elasticapm-raise-exc")) - assert len(django_elasticapm_client.events[ERROR]) == 1 - event = django_elasticapm_client.events[ERROR][0] - assert event["context"]["user"] == {} - - -@pytest.mark.skipif(django.VERSION < (1, 10), reason="MIDDLEWARE new in Django 1.10") -def test_user_info_without_auth_middleware_django_2(django_elasticapm_client, client): with override_settings( MIDDLEWARE_CLASSES=None, MIDDLEWARE=[m for m in settings.MIDDLEWARE if m != "django.contrib.auth.middleware.AuthenticationMiddleware"], @@ -614,8 +581,7 @@ def read(): assert_any_record_contains(caplog.records, "Can't capture request body: foobar") -@pytest.mark.skipif(django.VERSION < (1, 9), reason="get-raw-uri-not-available") -def test_disallowed_hosts_error_django_19(django_elasticapm_client): +def test_disallowed_hosts_error(django_elasticapm_client): request = WSGIRequest( environ={ "wsgi.input": io.BytesIO(), @@ -634,26 +600,6 @@ def test_disallowed_hosts_error_django_19(django_elasticapm_client): assert event["context"]["request"]["url"]["full"] == "http://testserver/" -@pytest.mark.skipif(django.VERSION >= (1, 9), reason="get-raw-uri-available") -def test_disallowed_hosts_error_django_18(django_elasticapm_client): - request = WSGIRequest( - environ={ - "wsgi.input": io.BytesIO(), - "wsgi.url_scheme": "http", - "REQUEST_METHOD": "POST", - "SERVER_NAME": "testserver", - "SERVER_PORT": "80", - "CONTENT_TYPE": "application/json", - "ACCEPT": "application/json", - } - ) - with override_settings(ALLOWED_HOSTS=["example.com"]): - # this should not raise a DisallowedHost exception - django_elasticapm_client.capture("Message", message="foo", request=request) - event = django_elasticapm_client.events[ERROR][0] - assert event["context"]["request"]["url"] == {"full": "DisallowedHost"} - - @pytest.mark.parametrize( "django_elasticapm_client", [{"capture_body": "errors"}, {"capture_body": "all"}, {"capture_body": "off"}], @@ -1196,16 +1142,6 @@ def test_stacktrace_filtered_for_elasticapm(client, django_elasticapm_client): assert spans[1]["stacktrace"][0]["module"].startswith("django.template"), spans[1]["stacktrace"][0]["function"] -@pytest.mark.skipif(django.VERSION > (1, 7), reason="argparse raises CommandError in this case") -@mock.patch("elasticapm.contrib.django.management.commands.elasticapm.Command._get_argv") -def test_subcommand_not_set(argv_mock): - stdout = io.StringIO() - argv_mock.return_value = ["manage.py", "elasticapm"] - call_command("elasticapm", stdout=stdout) - output = stdout.getvalue() - assert "No command specified" in output - - @mock.patch("elasticapm.contrib.django.management.commands.elasticapm.Command._get_argv") def test_subcommand_not_known(argv_mock): stdout = io.StringIO() @@ -1317,8 +1253,8 @@ def test_settings_server_url_with_credentials(): @pytest.mark.skipif( - not ((1, 10) <= django.VERSION < (2, 0)), - reason="only needed in 1.10 and 1.11 when both middleware settings are valid", + django.VERSION >= (2, 0), + reason="only needed in 1.11 when both middleware settings are valid", ) def test_django_1_10_uses_deprecated_MIDDLEWARE_CLASSES(): stdout = io.StringIO() @@ -1382,6 +1318,23 @@ def test_capture_post_errors_dict(client, django_elasticapm_client): assert error["context"]["request"]["body"] == "[REDACTED]" +@pytest.mark.parametrize( + "django_elasticapm_client", + [{"capture_body": "errors"}, {"capture_body": "transactions"}, {"capture_body": "all"}, {"capture_body": "off"}], + indirect=True, +) +def test_capture_django_orm_timeout_error(client, django_elasticapm_client): + with pytest.raises(DatabaseError): + client.get(reverse("elasticapm-django-orm-exc")) + + errors = django_elasticapm_client.events[ERROR] + if django_elasticapm_client.config.capture_body in (constants.ERROR, "all"): + stacktrace = errors[0]["exception"]["stacktrace"] + frames = [frame for frame in stacktrace if frame["function"] == "django_queryset_error"] + qs_var = frames[0]["vars"]["qs"] + assert qs_var == "" + + def test_capture_body_config_is_dynamic_for_errors(client, django_elasticapm_client): django_elasticapm_client.config.update(version="1", capture_body="all") with pytest.raises(MyException): diff --git a/tests/contrib/django/testapp/urls.py b/tests/contrib/django/testapp/urls.py index 857215280..92302e313 100644 --- a/tests/contrib/django/testapp/urls.py +++ b/tests/contrib/django/testapp/urls.py @@ -62,6 +62,7 @@ def handler500(request): re_path(r"^trigger-500-ioerror$", views.raise_ioerror, name="elasticapm-raise-ioerror"), re_path(r"^trigger-500-decorated$", views.decorated_raise_exc, name="elasticapm-raise-exc-decor"), re_path(r"^trigger-500-django$", views.django_exc, name="elasticapm-django-exc"), + re_path(r"^trigger-500-django-orm-exc$", views.django_queryset_error, name="elasticapm-django-orm-exc"), re_path(r"^trigger-500-template$", views.template_exc, name="elasticapm-template-exc"), re_path(r"^trigger-500-log-request$", views.logging_request_exc, name="elasticapm-log-request-exc"), re_path(r"^streaming$", views.streaming_view, name="elasticapm-streaming-view"), diff --git a/tests/contrib/django/testapp/views.py b/tests/contrib/django/testapp/views.py index 5a11b0961..906a8c2df 100644 --- a/tests/contrib/django/testapp/views.py +++ b/tests/contrib/django/testapp/views.py @@ -34,6 +34,8 @@ import time from django.contrib.auth.models import User +from django.db import DatabaseError +from django.db.models import QuerySet from django.http import HttpResponse, StreamingHttpResponse from django.shortcuts import get_object_or_404, render from django.views import View @@ -70,6 +72,20 @@ def django_exc(request): return get_object_or_404(MyException, pk=1) +def django_queryset_error(request): + """Simulation of django ORM timeout""" + + class CustomQuerySet(QuerySet): + def all(self): + raise DatabaseError() + + def __repr__(self) -> str: + return str(self._result_cache) + + qs = CustomQuerySet() + list(qs.all()) + + def raise_exc(request): raise MyException(request.GET.get("message", "view exception")) diff --git a/tests/contrib/opentelemetry/tests.py b/tests/contrib/opentelemetry/tests.py index 3a302289e..aa0160b74 100755 --- a/tests/contrib/opentelemetry/tests.py +++ b/tests/contrib/opentelemetry/tests.py @@ -34,6 +34,7 @@ from opentelemetry.trace import Link, SpanContext, SpanKind, TraceFlags from opentelemetry.trace.propagation import _SPAN_KEY +from opentelemetry.trace.status import Status, StatusCode import elasticapm.contrib.opentelemetry.context as context import elasticapm.contrib.opentelemetry.trace as trace @@ -155,4 +156,51 @@ def test_span_links(tracer: Tracer): assert span["links"][0]["span_id"] == "0011223344556677" +def test_set_status_with_status_object(tracer: Tracer): + with tracer.start_as_current_span("test") as span: + span.set_status(Status(StatusCode.OK)) + + client = tracer.client + transaction = client.events[constants.TRANSACTION][0] + assert transaction["outcome"] == "success" + + +def test_set_status_with_status_code(tracer: Tracer): + with tracer.start_as_current_span("test") as span: + span.set_status(StatusCode.ERROR) + + client = tracer.client + transaction = client.events[constants.TRANSACTION][0] + assert transaction["outcome"] == "failure" + + +def test_set_status_with_status_code_and_description(tracer: Tracer): + with tracer.start_as_current_span("test") as span: + span.set_status(StatusCode.OK, "Everything is fine") + + client = tracer.client + transaction = client.events[constants.TRANSACTION][0] + assert transaction["outcome"] == "success" + + +def test_set_status_unset(tracer: Tracer): + with tracer.start_as_current_span("test") as span: + span.set_status(StatusCode.UNSET) + + client = tracer.client + transaction = client.events[constants.TRANSACTION][0] + assert transaction["outcome"] == "unknown" + + +def test_set_status_on_span(tracer: Tracer): + """Test set_status on a child span (not transaction)""" + with tracer.start_as_current_span("test"): + with tracer.start_as_current_span("testspan") as span: + span.set_status(StatusCode.ERROR) + + client = tracer.client + span_event = client.events[constants.SPAN][0] + assert span_event["outcome"] == "failure" + + # TODO Add some span subtype testing? diff --git a/tests/contrib/sanic/fixtures.py b/tests/contrib/sanic/fixtures.py index e4d8d4158..e21bb00b9 100644 --- a/tests/contrib/sanic/fixtures.py +++ b/tests/contrib/sanic/fixtures.py @@ -155,6 +155,15 @@ async def raise_value_error(request): async def custom_headers(request): return json({"data": "message"}, headers={"sessionid": 1234555}) + @app.get("/add-cookies") + async def add_cookies(request): + response = json({"data": "message"}, headers={"sessionid": 1234555}) + if hasattr(response, "add_cookie"): + response.add_cookie("some", "cookie") + else: + response.cookies["some"] = "cookie" + return response + try: yield app, apm finally: diff --git a/tests/contrib/sanic/sanic_tests.py b/tests/contrib/sanic/sanic_tests.py index ec97fb02f..a59d508a1 100644 --- a/tests/contrib/sanic/sanic_tests.py +++ b/tests/contrib/sanic/sanic_tests.py @@ -194,6 +194,17 @@ def test_header_field_sanitization(sanic_elastic_app, elasticapm_client): assert transaction["context"]["request"]["headers"]["api_key"] == "[REDACTED]" +def test_cookies_normalization(sanic_elastic_app, elasticapm_client): + sanic_app, apm = next(sanic_elastic_app(elastic_client=elasticapm_client)) + _, resp = sanic_app.test_client.get( + "/add-cookies", + ) + assert resp.status_code == 200 + assert len(apm._client.events[constants.TRANSACTION]) == 1 + transaction = apm._client.events[constants.TRANSACTION][0] + assert transaction["context"]["response"]["cookies"] == {"some": {"value": "cookie", "path": "/"}} + + def test_custom_callback_handlers(sanic_elastic_app, elasticapm_client): def _custom_transaction_callback(request): return "my-custom-name" diff --git a/tests/contrib/serverless/aws_elb_test_data.json b/tests/contrib/serverless/aws_elb_test_data.json index 87e05ac85..79b4dc6dd 100644 --- a/tests/contrib/serverless/aws_elb_test_data.json +++ b/tests/contrib/serverless/aws_elb_test_data.json @@ -15,6 +15,7 @@ "connection": "Keep-Alive", "host": "blabla.com", "user-agent": "Apache-HttpClient/4.5.13 (Java/11.0.15)", + "TraceParent": "00-12345678901234567890123456789012-1234567890123456-01", "x-amzn-trace-id": "Root=1-xxxxxxxxxxxxxx", "x-forwarded-for": "199.99.99.999", "x-forwarded-port": "443", diff --git a/tests/contrib/serverless/aws_sns_test_data.json b/tests/contrib/serverless/aws_sns_test_data.json index e6c7a89ef..a1900c54b 100644 --- a/tests/contrib/serverless/aws_sns_test_data.json +++ b/tests/contrib/serverless/aws_sns_test_data.json @@ -27,6 +27,10 @@ "City": { "Type": "String", "Value": "Any City" + }, + "traceparent": { + "Type": "String", + "Value": "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-00" } } } diff --git a/tests/contrib/serverless/aws_tests.py b/tests/contrib/serverless/aws_tests.py index 9f4a7253f..f52636dcf 100644 --- a/tests/contrib/serverless/aws_tests.py +++ b/tests/contrib/serverless/aws_tests.py @@ -36,7 +36,12 @@ from elasticapm import capture_span from elasticapm.conf import constants -from elasticapm.contrib.serverless.aws import capture_serverless, get_data_from_request, get_data_from_response +from elasticapm.contrib.serverless.aws import ( + capture_serverless, + get_data_from_request, + get_data_from_response, + should_normalize_headers, +) @pytest.fixture @@ -300,6 +305,7 @@ def test_func(event, context): assert transaction["context"]["request"]["headers"] assert transaction["context"]["response"]["status_code"] == 200 assert transaction["context"]["service"]["origin"]["name"] == "lambda-279XGJDqGZ5rsrHC2Fjr" + assert transaction["trace_id"] == "12345678901234567890123456789012" def test_capture_serverless_s3(event_s3, context, elasticapm_client): @@ -338,6 +344,8 @@ def test_func(event, context): assert transaction["span_count"]["started"] == 1 assert transaction["context"]["message"]["headers"]["Population"] == "1250800" assert transaction["context"]["message"]["headers"]["City"] == "Any City" + assert len(transaction["links"]) == 1 + assert transaction["links"][0] == {"trace_id": "0af7651916cd43dd8448eb211c80319c", "span_id": "b7ad6b7169203331"} def test_capture_serverless_sqs(event_sqs, context, elasticapm_client): @@ -477,3 +485,17 @@ def test_func(event, context): test_func(event_api2, context) assert len(elasticapm_client.events[constants.TRANSACTION]) == 1 + + +def test_should_normalize_headers_true(event_api, event_elb): + assert should_normalize_headers(event_api) is True + assert should_normalize_headers(event_elb) is True + + +def test_should_normalize_headers_false(event_api2, event_lurl, event_s3, event_s3_batch, event_sqs, event_sns): + assert should_normalize_headers(event_api2) is False + assert should_normalize_headers(event_lurl) is False + assert should_normalize_headers(event_s3) is False + assert should_normalize_headers(event_s3_batch) is False + assert should_normalize_headers(event_sqs) is False + assert should_normalize_headers(event_sns) is False diff --git a/tests/contrib/serverless/azurefunctions/azure_functions_tests.py b/tests/contrib/serverless/azurefunctions/azure_functions_tests.py index 1db33758a..e2abbdcd3 100644 --- a/tests/contrib/serverless/azurefunctions/azure_functions_tests.py +++ b/tests/contrib/serverless/azurefunctions/azure_functions_tests.py @@ -29,7 +29,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest -azure = pytest.importorskip("azure") +azure = pytest.importorskip("azure.functions") import datetime import os @@ -37,6 +37,7 @@ import azure.functions as func import mock +import elasticapm from elasticapm.conf import constants from elasticapm.contrib.serverless.azure import AzureFunctionsClient, ElasticAPMExtension, get_faas_data from tests.fixtures import TempStoreClient @@ -95,6 +96,7 @@ def test_extension_configure(): ElasticAPMExtension.configure(client_class=AzureFunctionsTestClient) client = ElasticAPMExtension.client assert client.config.metrics_interval == datetime.timedelta(0) + assert client.config.breakdown_metrics is False assert client.config.central_config is False assert client.config.cloud_provider == "none" assert client.config.framework_name == "Azure Functions" @@ -106,6 +108,27 @@ def test_extension_configure(): ElasticAPMExtension.client = None +def test_extension_configure_with_kwargs(): + try: + ElasticAPMExtension.configure( + client_class=AzureFunctionsTestClient, metrics_sets=["foo"], service_name="foo", environment="bar" + ) + client = ElasticAPMExtension.client + + assert client.config.metrics_interval == datetime.timedelta(0) + assert client.config.breakdown_metrics is False + assert client.config.central_config is False + assert client.config.cloud_provider == "none" + assert client.config.framework_name == "Azure Functions" + assert client.config.service_name == "foo" + assert client.config.environment == "bar" + assert client.config.metrics_sets == ["foo"] + finally: + if ElasticAPMExtension.client: + ElasticAPMExtension.client.close() + ElasticAPMExtension.client = None + + @pytest.mark.parametrize( "elasticapm_client", [{"client_class": AzureFunctionsTestClient}], indirect=["elasticapm_client"] ) @@ -122,8 +145,7 @@ def test_pre_post_invocation_app_level_request(elasticapm_client): body=b"", ) response = func.HttpResponse("", status_code=200, headers={}, mimetype="text/html") - context = mock.Mock(function_name="foo") - context.function_name = "foo_function" + context = mock.Mock(function_name="foo_function", invocation_id="fooid") ElasticAPMExtension.pre_invocation_app_level(None, context, {"request": request}) ElasticAPMExtension.post_invocation_app_level(None, context, func_ret=response) transaction = elasticapm_client.events[constants.TRANSACTION][0] diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 48b43cda2..0629a045e 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -1,5 +1,3 @@ -version: '2.1' - services: postgres: user: postgres @@ -46,27 +44,21 @@ services: volumes: - pymongodata36:/data/db + mongodb42: + image: mongo:4.2 + ports: + - "27017:27017" + volumes: + - pymongodata42:/data/db + memcached: image: memcached redis: image: redis - elasticsearch6: - image: docker.elastic.co/elasticsearch/elasticsearch:6.8.0 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9200"] - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - volumes: - - pyesdata6:/usr/share/elasticsearch/data - elasticsearch7: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0 + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.29 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] environment: @@ -79,7 +71,7 @@ services: - pyesdata7:/usr/share/elasticsearch/data elasticsearch8: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0 + image: docker.elastic.co/elasticsearch/elasticsearch:8.19.6 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] ports: @@ -94,31 +86,21 @@ services: volumes: - pyesdata8:/usr/share/elasticsearch/data - elasticsearch5: - image: docker.elastic.co/elasticsearch/elasticsearch:5.6.16 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9200"] - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - volumes: - - pyesdata5:/usr/share/elasticsearch/data - - elasticsearch2: - image: elasticsearch:2 + elasticsearch9: + image: docker.elastic.co/elasticsearch/elasticsearch:9.2.0 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] + ports: + - "9200:9200" environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" + - "network.host=_site_" - "transport.host=127.0.0.1" - "http.host=0.0.0.0" - "xpack.security.enabled=false" + - "action.destructive_requires_name=false" # allow for easy cleanup by calling DELETE * volumes: - - pyesdata2:/usr/share/elasticsearch/data + - pyesdata9:/usr/share/elasticsearch/data mssql: image: mcr.microsoft.com/mssql/server @@ -130,7 +112,7 @@ services: - pymssqldata:/var/opt/mssql mysql: - image: mysql + image: mysql:8.0 command: --default-authentication-plugin=mysql_native_password --log_error_verbosity=3 environment: - MYSQL_DATABASE=eapm_tests @@ -157,34 +139,30 @@ services: - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" - "/var/run/docker.sock:/var/run/docker.sock" - zookeeper: - image: docker.io/bitnami/zookeeper:3.8 - ports: - - "2181:2181" - volumes: - - "zookeeper_data:/bitnami" - environment: - - ALLOW_ANONYMOUS_LOGIN=yes kafka: - image: docker.io/bitnami/kafka:3.1 + image: apache/kafka:4.1.0 ports: - "9092:9092" - volumes: - - "kafka_data:/bitnami" environment: - - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 - - ALLOW_PLAINTEXT_LISTENER=yes - depends_on: - - zookeeper + - "KAFKA_NODE_ID=1" + - "KAFKA_PROCESS_ROLES=broker,controller" + - "KAFKA_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093" + - "KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092" + - "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT" + - "KAFKA_CONTROLLER_QUORUM_VOTERS=1@localhost:9093" + - "KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER" + - "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1" + - "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1" + - "KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1" + volumes: + - "kafka_data:/var/lib/kafka/data" run_tests: - image: elasticobservability/apm-agent-python-testing:${PYTHON_VERSION} + image: ${REGISTRY:-elasticobservability}/${IMAGE_NAME:-apm-agent-python-testing}:${PYTHON_VERSION} environment: + ES_9_URL: 'http://elasticsearch9:9200' ES_8_URL: 'http://elasticsearch8:9200' ES_7_URL: 'http://elasticsearch7:9200' - ES_6_URL: 'http://elasticsearch6:9200' - ES_5_URL: 'http://elasticsearch5:9200' - ES_2_URL: 'http://elasticsearch2:9200' volumes: @@ -198,15 +176,13 @@ volumes: driver: local pymongodata36: driver: local + pymongodata42: + driver: local pyesdata7: driver: local pyesdata8: driver: local - pyesdata6: - driver: local - pyesdata5: - driver: local - pyesdata2: + pyesdata9: driver: local pycassandradata3: driver: local @@ -214,8 +190,6 @@ volumes: driver: local mysqldata: driver: local - zookeeper_data: - driver: local kafka_data: driver: local localstack_data: diff --git a/tests/fixtures.py b/tests/fixtures.py index 94e89f961..25d21ee5d 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -28,7 +28,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import codecs import gzip import io import itertools @@ -46,6 +45,7 @@ import zlib from collections import defaultdict from typing import Optional +from urllib.request import pathname2url import jsonschema import mock @@ -58,15 +58,10 @@ from elasticapm.conf.constants import SPAN from elasticapm.instrumentation import register from elasticapm.traces import execution_context +from elasticapm.transport.http import Transport from elasticapm.transport.http_base import HTTPTransportBase from elasticapm.utils.threading import ThreadManager -try: - from urllib.request import pathname2url -except ImportError: - # Python 2 - from urllib import pathname2url - cur_dir = os.path.dirname(os.path.realpath(__file__)) ERRORS_SCHEMA = os.path.join(cur_dir, "upstream", "json-specs", "error.json") @@ -79,11 +74,11 @@ SPAN_TYPES = json.load(f) -with codecs.open(ERRORS_SCHEMA, encoding="utf8") as errors_json, codecs.open( +with open(ERRORS_SCHEMA, encoding="utf8") as errors_json, open( TRANSACTIONS_SCHEMA, encoding="utf8" -) as transactions_json, codecs.open(SPAN_SCHEMA, encoding="utf8") as span_json, codecs.open( +) as transactions_json, open(SPAN_SCHEMA, encoding="utf8") as span_json, open( METRICSET_SCHEMA, encoding="utf8" -) as metricset_json, codecs.open( +) as metricset_json, open( METADATA_SCHEMA, encoding="utf8" ) as metadata_json: VALIDATORS = { @@ -401,6 +396,18 @@ def get_config(self, current_version=None, keys=None): return False, None, 30 +class MockSendHTTPTransport(Transport): + """Mocking the send method of the Transport class sometimes fails silently in client tests. + After spending some time trying to understand this with no luck just use this class instead.""" + + def __init__(self, url, *args, **kwargs): + self.send_mock = mock.Mock() + super().__init__(url, *args, **kwargs) + + def send(self, data, forced_flush=False, custom_url=None, custom_headers=None): + return self.send_mock(data, forced_flush, custom_url, custom_headers) + + class TempStoreClient(Client): def __init__(self, config=None, **inline) -> None: inline.setdefault("transport_class", "tests.fixtures.DummyTransport") diff --git a/tests/handlers/logging/logging_tests.py b/tests/handlers/logging/logging_tests.py index 8e23a0b69..8cc8fc4f1 100644 --- a/tests/handlers/logging/logging_tests.py +++ b/tests/handlers/logging/logging_tests.py @@ -380,7 +380,7 @@ def test_logging_handler_no_client(recwarn): while True: # If we never find our desired warning this will eventually throw an # AssertionError - w = recwarn.pop(PendingDeprecationWarning) + w = recwarn.pop(DeprecationWarning) if "LoggingHandler requires a Client instance" in w.message.args[0]: return True diff --git a/tests/instrumentation/asyncio_tests/psycopg_tests.py b/tests/instrumentation/asyncio_tests/psycopg_tests.py new file mode 100644 index 000000000..7dbc31438 --- /dev/null +++ b/tests/instrumentation/asyncio_tests/psycopg_tests.py @@ -0,0 +1,128 @@ +# BSD 3-Clause License +# +# Copyright (c) 2025, Elasticsearch BV +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +import pytest +import pytest_asyncio + +from elasticapm.conf import constants + +psycopg = pytest.importorskip("psycopg") # isort:skip +pytestmark = [pytest.mark.psycopg, pytest.mark.asyncio] + +if "POSTGRES_DB" not in os.environ: + pytestmark.append(pytest.mark.skip("Skipping psycopg tests, no POSTGRES_DB environment variable set")) + + +def connect_kwargs(): + return { + "dbname": os.environ.get("POSTGRES_DB", "elasticapm_test"), + "user": os.environ.get("POSTGRES_USER", "postgres"), + "password": os.environ.get("POSTGRES_PASSWORD", "postgres"), + "host": os.environ.get("POSTGRES_HOST", None), + "port": os.environ.get("POSTGRES_PORT", None), + } + + +@pytest_asyncio.fixture(scope="function") +async def postgres_connection(request): + conn = await psycopg.AsyncConnection.connect(**connect_kwargs()) + cursor = conn.cursor() + await cursor.execute( + "CREATE TABLE test(id int, name VARCHAR(5) NOT NULL);" + "INSERT INTO test VALUES (1, 'one'), (2, 'two'), (3, 'three');" + ) + + yield conn + + # cleanup + await cursor.execute("ROLLBACK") + + +async def test_cursor_execute_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + record = await cursor.execute(query="SELECT 1", params=None, prepare=None, binary=None) + assert record + + +async def test_cursor_executemany_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + res = await cursor.executemany( + query="INSERT INTO test VALUES (%s, %s)", + params_seq=((4, "four"),), + returning=False, + ) + assert res is None + + +async def test_execute_with_sleep(instrument, postgres_connection, elasticapm_client): + elasticapm_client.begin_transaction("test") + cursor = postgres_connection.cursor() + await cursor.execute("SELECT pg_sleep(0.1);") + elasticapm_client.end_transaction("test", "OK") + + transaction = elasticapm_client.events[constants.TRANSACTION][0] + spans = elasticapm_client.spans_for_transaction(transaction) + + assert len(spans) == 1 + span = spans[0] + assert 100 < span["duration"] < 110 + assert transaction["id"] == span["transaction_id"] + assert span["type"] == "db" + assert span["subtype"] == "postgresql" + assert span["action"] == "query" + assert span["sync"] == False + assert span["name"] == "SELECT FROM" + + +async def test_executemany(instrument, postgres_connection, elasticapm_client): + elasticapm_client.begin_transaction("test") + cursor = postgres_connection.cursor() + await cursor.executemany("INSERT INTO test VALUES (%s, %s);", [(1, "uno"), (2, "due")]) + elasticapm_client.end_transaction("test", "OK") + + transaction = elasticapm_client.events[constants.TRANSACTION][0] + spans = elasticapm_client.spans_for_transaction(transaction) + + assert len(spans) == 1 + span = spans[0] + assert transaction["id"] == span["transaction_id"] + assert span["subtype"] == "postgresql" + assert span["action"] == "query" + assert span["sync"] == False + assert span["name"] == "INSERT INTO test" + + +async def test_server_cursor_execute(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor(name="async_server_cursor") + assert isinstance(cursor, psycopg.AsyncServerCursor) + record = await cursor.execute(query="SELECT 1", params=None, binary=None) + assert record diff --git a/tests/instrumentation/azure_tests.py b/tests/instrumentation/azure_tests.py index aeaab03c0..5662bedfc 100644 --- a/tests/instrumentation/azure_tests.py +++ b/tests/instrumentation/azure_tests.py @@ -39,10 +39,13 @@ azureblob = pytest.importorskip("azure.storage.blob") azurequeue = pytest.importorskip("azure.storage.queue") azuretable = pytest.importorskip("azure.cosmosdb.table") +azuredatatable = pytest.importorskip("azure.data.tables") azurefile = pytest.importorskip("azure.storage.fileshare") pytestmark = [pytest.mark.azurestorage] + from azure.cosmosdb.table.tableservice import TableService +from azure.data.tables import TableServiceClient as DataTableServiceClient from azure.storage.blob import BlobServiceClient from azure.storage.fileshare import ShareClient from azure.storage.queue import QueueClient @@ -82,6 +85,19 @@ def queue_client(): queue_client.delete_queue() +@pytest.fixture() +def data_table_service(): + table_name = "apmagentpythonci" + str(uuid.uuid4().hex) + data_table_service_client = DataTableServiceClient.from_connection_string(conn_str=CONNECTION_STRING) + data_table_service = data_table_service_client.get_table_client(table_name) + data_table_service.create_table() + data_table_service.table_name = table_name + + yield data_table_service + + data_table_service.delete_table() + + @pytest.fixture() def table_service(): table_name = "apmagentpythonci" + str(uuid.uuid4().hex) @@ -182,6 +198,24 @@ def test_queue(instrument, elasticapm_client, queue_client): assert span["action"] == "delete" +def test_data_table_create(instrument, elasticapm_client): + table_name = "apmagentpythonci" + str(uuid.uuid4().hex) + data_table_service_client = DataTableServiceClient.from_connection_string(conn_str=CONNECTION_STRING) + data_table_service = data_table_service_client.get_table_client(table_name) + + elasticapm_client.begin_transaction("transaction.test") + data_table_service.create_table() + data_table_service.delete_table() + elasticapm_client.end_transaction("MyView") + + span = elasticapm_client.events[constants.SPAN][0] + + assert span["name"] == "AzureTable Create {}".format(table_name) + assert span["type"] == "storage" + assert span["subtype"] == "azuretable" + assert span["action"] == "Create" + + def test_table_create(instrument, elasticapm_client): table_name = "apmagentpythonci" + str(uuid.uuid4().hex) table_service = TableService(connection_string=CONNECTION_STRING) @@ -199,6 +233,42 @@ def test_table_create(instrument, elasticapm_client): assert span["action"] == "Create" +def test_data_table(instrument, elasticapm_client, data_table_service): + table_name = data_table_service.table_name + elasticapm_client.begin_transaction("transaction.test") + task = {"PartitionKey": "tasksSeattle", "RowKey": "001", "description": "Take out the trash", "priority": 200} + data_table_service.create_entity(task) + task = {"PartitionKey": "tasksSeattle", "RowKey": "001", "description": "Take out the garbage", "priority": 250} + data_table_service.update_entity(task) + task = data_table_service.get_entity("tasksSeattle", "001") + data_table_service.delete_entity("tasksSeattle", "001") + elasticapm_client.end_transaction("MyView") + + span = elasticapm_client.events[constants.SPAN][0] + assert span["name"] == "AzureTable Insert {}".format(table_name) + assert span["type"] == "storage" + assert span["subtype"] == "azuretable" + assert span["action"] == "Insert" + + span = elasticapm_client.events[constants.SPAN][1] + assert span["name"] == "AzureTable Update {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name) + assert span["type"] == "storage" + assert span["subtype"] == "azuretable" + assert span["action"] == "Update" + + span = elasticapm_client.events[constants.SPAN][2] + assert span["name"] == "AzureTable Query {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name) + assert span["type"] == "storage" + assert span["subtype"] == "azuretable" + assert span["action"] == "Query" + + span = elasticapm_client.events[constants.SPAN][3] + assert span["name"] == "AzureTable Delete {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name) + assert span["type"] == "storage" + assert span["subtype"] == "azuretable" + assert span["action"] == "Delete" + + def test_table(instrument, elasticapm_client, table_service): table_name = table_service.table_name elasticapm_client.begin_transaction("transaction.test") diff --git a/tests/instrumentation/dbapi2_tests.py b/tests/instrumentation/dbapi2_tests.py index 7519b0801..089571715 100644 --- a/tests/instrumentation/dbapi2_tests.py +++ b/tests/instrumentation/dbapi2_tests.py @@ -29,7 +29,13 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest -from elasticapm.instrumentation.packages.dbapi2 import Literal, extract_signature, scan, tokenize +from elasticapm.instrumentation.packages.dbapi2 import ( + Literal, + extract_action_from_signature, + extract_signature, + scan, + tokenize, +) def test_scan_simple(): @@ -114,3 +120,66 @@ def test_extract_signature_bytes(): actual = extract_signature(sql) expected = "HELLO" assert actual == expected + + +def test_extract_signature_pathological(): + # tune for performance testing + multiplier = 10 + values = [] + for chunk in range(multiplier): + i = chunk * 3 + values.append(f" (${1+i}::varchar, ${2+i}::varchar, ${3+i}::varchar), ") + + sql = f"SELECT * FROM (VALUES {''.join(values)})\n" + actual = extract_signature(sql) + expected = "SELECT FROM" + assert actual == expected + + +@pytest.mark.parametrize( + ["sql", "expected"], + [ + ( + "EXEC AdventureWorks2022.dbo.uspGetEmployeeManagers 50;", + "EXECUTE AdventureWorks2022.dbo.uspGetEmployeeManagers", + ), + ("EXECUTE sp_who2", "EXECUTE sp_who2"), + ("EXEC sp_updatestats @@all_schemas = 'true'", "EXECUTE sp_updatestats"), + ("CALL get_car_stats_by_year(2017, @number, @min, @avg, @max);", "CALL get_car_stats_by_year()"), + ("CALL get_car_stats_by_year", "CALL get_car_stats_by_year()"), + ("CALL get_car_stats_by_year;", "CALL get_car_stats_by_year()"), + ("CALL get_car_stats_by_year();", "CALL get_car_stats_by_year()"), + ], +) +def test_extract_signature_for_procedure_call(sql, expected): + actual = extract_signature(sql) + assert actual == expected + + +@pytest.mark.parametrize( + ["sql", "expected"], + [ + ("SELECT FROM table", "query"), + ("EXEC sp_who", "exec"), + ("EXECUTE sp_updatestats", "exec"), + ("CALL me_maybe", "exec"), + ], +) +def test_extract_action_from_signature(sql, expected): + actual = extract_action_from_signature(sql, "query") + assert actual == expected + + +@pytest.mark.parametrize( + ["sql", "expected"], + [ + ("SELECT username FROM user", "SELECT FROM user"), + ("SELECT username FROM [user]", "SELECT FROM [user]"), + ("SELECT username FROM [db].[user]", "SELECT FROM [db].[user]"), + ("SELECT username FROM db.[user]", "SELECT FROM db.[user]"), + ("SELECT username FROM [db].user", "SELECT FROM [db].user"), + ], +) +def test_extract_signature_when_using_square_brackets(sql, expected): + actual = extract_signature(sql) + assert actual == expected diff --git a/tests/instrumentation/kafka_tests.py b/tests/instrumentation/kafka_tests.py index 71416c130..54be2ee8e 100644 --- a/tests/instrumentation/kafka_tests.py +++ b/tests/instrumentation/kafka_tests.py @@ -45,19 +45,27 @@ pytestmark = [pytest.mark.kafka] -if "KAFKA_HOST" not in os.environ: +KAFKA_HOST = os.environ.get("KAFKA_HOST") +if not KAFKA_HOST: pytestmark.append(pytest.mark.skip("Skipping kafka tests, no KAFKA_HOST environment variable set")) -KAFKA_HOST = os.environ["KAFKA_HOST"] - @pytest.fixture(scope="function") def topics(): topics = ["test", "foo", "bar"] admin_client = KafkaAdminClient(bootstrap_servers=[f"{KAFKA_HOST}:9092"]) - admin_client.create_topics([NewTopic(name, num_partitions=1, replication_factor=1) for name in topics]) + # since kafka-python 2.1.0 we started to get failures in create_topics because topics were already there despite + # calls to delete_topics. In the meantime we found a proper fix use a big hammer and catch topics handling failures + # https://github.com/dpkp/kafka-python/issues/2557 + try: + admin_client.create_topics([NewTopic(name, num_partitions=1, replication_factor=1) for name in topics]) + except Exception: + pass yield topics - admin_client.delete_topics(topics) + try: + admin_client.delete_topics(topics) + except Exception: + pass @pytest.fixture() @@ -233,3 +241,22 @@ def test_kafka_poll_unsampled_transaction(instrument, elasticapm_client, consume elasticapm_client.end_transaction("foo") spans = elasticapm_client.events[SPAN] assert len(spans) == 0 + + +def test_kafka_consumer_unsampled_transaction_handles_stop_iteration( + instrument, elasticapm_client, producer, consumer, topics +): + def delayed_send(): + time.sleep(0.2) + producer.send("test", key=b"foo", value=b"bar") + + thread = threading.Thread(target=delayed_send) + thread.start() + transaction = elasticapm_client.begin_transaction("foo") + transaction.is_sampled = False + for item in consumer: + pass + thread.join() + elasticapm_client.end_transaction("foo") + spans = elasticapm_client.events[SPAN] + assert len(spans) == 0 diff --git a/tests/instrumentation/mysql_connector_tests.py b/tests/instrumentation/mysql_connector_tests.py index 89e2df812..3b6cb47bd 100644 --- a/tests/instrumentation/mysql_connector_tests.py +++ b/tests/instrumentation/mysql_connector_tests.py @@ -63,9 +63,6 @@ def mysql_connector_connection(request): cursor.execute("DROP TABLE `test`") -@pytest.mark.skipif( - sys.version_info >= (3, 12), reason="Perhaps related to changes in weakref in py3.12?" -) # TODO py3.12 @pytest.mark.integrationtest def test_mysql_connector_select(instrument, mysql_connector_connection, elasticapm_client): cursor = mysql_connector_connection.cursor() diff --git a/tests/instrumentation/psycopg2_tests.py b/tests/instrumentation/psycopg2_tests.py index 70c0d6329..1cee5b269 100644 --- a/tests/instrumentation/psycopg2_tests.py +++ b/tests/instrumentation/psycopg2_tests.py @@ -266,6 +266,27 @@ def test_fully_qualified_table_name(): assert "SELECT FROM db.schema.mytable" == actual +@pytest.mark.integrationtest +@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") +def test_cursor_execute_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + cursor.execute(query="SELECT 1", vars=None) + row = cursor.fetchone() + + assert row + + +@pytest.mark.integrationtest +@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") +def test_cursor_executemany_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + res = cursor.executemany( + query="INSERT INTO test VALUES (%s, %s)", + vars_list=((4, "four"),), + ) + assert res is None + + @pytest.mark.integrationtest @pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") def test_destination(instrument, postgres_connection, elasticapm_client): diff --git a/tests/instrumentation/psycopg_tests.py b/tests/instrumentation/psycopg_tests.py index 38768bcef..f53565b34 100644 --- a/tests/instrumentation/psycopg_tests.py +++ b/tests/instrumentation/psycopg_tests.py @@ -47,6 +47,8 @@ has_postgres_configured = "POSTGRES_DB" in os.environ +PSYCOPG_VERSION = tuple([int(x) for x in psycopg.version.__version__.split() if x.isdigit()]) + def connect_kwargs(): return { @@ -73,6 +75,31 @@ def postgres_connection(request): cursor.execute("ROLLBACK") +@pytest.mark.integrationtest +@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") +def test_cursor_execute_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + cursor.execute(query="SELECT 1", params=None, prepare=None, binary=None) + row = cursor.fetchone() + assert row + + +@pytest.mark.integrationtest +@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") +def test_cursor_executemany_signature(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor() + if PSYCOPG_VERSION < (3, 1, 0): + kwargs = {} + else: + kwargs = {"returning": False} + res = cursor.executemany( + query="INSERT INTO test VALUES (%s, %s)", + params_seq=((4, "four"),), + **kwargs, + ) + assert res is None + + @pytest.mark.integrationtest @pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") def test_destination(instrument, postgres_connection, elasticapm_client): @@ -252,3 +279,12 @@ def test_psycopg_connection(instrument, elasticapm_transaction, postgres_connect host = os.environ.get("POSTGRES_HOST", "localhost") assert span["name"] == f"psycopg.connect {host}:5432" assert span["action"] == "connect" + + +@pytest.mark.integrationtest +@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured") +def test_server_cursor_execute(instrument, postgres_connection, elasticapm_client): + cursor = postgres_connection.cursor(name="server_cursor") + assert isinstance(cursor, psycopg.ServerCursor) + record = cursor.execute(query="SELECT 1", params=None, binary=True) + assert record diff --git a/tests/instrumentation/urllib3_tests.py b/tests/instrumentation/urllib3_tests.py index 8cc21ceb0..1fa03fa43 100644 --- a/tests/instrumentation/urllib3_tests.py +++ b/tests/instrumentation/urllib3_tests.py @@ -294,3 +294,20 @@ def test_instance_headers_are_respected( assert "kwargs" in request_headers if instance_headers and not (header_arg or header_kwarg): assert "instance" in request_headers + + +def test_connection_pool_urlopen_does_not_crash_with_many_args(instrument, elasticapm_client, waiting_httpserver): + """Mimics ConnectionPool.urlopen error path with broken connection, see #1928""" + waiting_httpserver.serve_content("") + url = waiting_httpserver.url + "/hello_world" + parsed_url = urllib.parse.urlparse(url) + pool = urllib3.HTTPConnectionPool( + parsed_url.hostname, + parsed_url.port, + maxsize=1, + block=True, + ) + retry = urllib3.util.Retry(10) + elasticapm_client.begin_transaction("transaction") + r = pool.urlopen("GET", url, None, {"args": "true"}, retry, False, False) + assert r.status == 200 diff --git a/tests/instrumentation/urllib_tests.py b/tests/instrumentation/urllib_tests.py index 3f2796483..fbf5fa44f 100644 --- a/tests/instrumentation/urllib_tests.py +++ b/tests/instrumentation/urllib_tests.py @@ -114,7 +114,7 @@ def test_urllib_error(instrument, elasticapm_client, waiting_httpserver, status_ @mock.patch(request_method) @mock.patch(getresponse_method) def test_urllib_standard_port(mock_getresponse, mock_request, instrument, elasticapm_client): - # "code" is needed for Python 3, "status" for Python 2 + # Python internally used both "code" and "status" mock_getresponse.return_value = mock.Mock(code=200, status=200) url = "http://example.com/" diff --git a/tests/requirements/reqs-asgi-2.txt b/tests/requirements/reqs-asgi-2.txt index eecc89d9a..ca2f94b02 100644 --- a/tests/requirements/reqs-asgi-2.txt +++ b/tests/requirements/reqs-asgi-2.txt @@ -1,6 +1,6 @@ quart==0.6.13 MarkupSafe<2.1 -jinja2==3.0.3 +jinja2==3.1.5 async-asgi-testclient asgiref -r reqs-base.txt diff --git a/tests/requirements/reqs-asyncpg-0.28.txt b/tests/requirements/reqs-asyncpg-0.28.txt new file mode 100644 index 000000000..2dd69e3a5 --- /dev/null +++ b/tests/requirements/reqs-asyncpg-0.28.txt @@ -0,0 +1,2 @@ +asyncpg==0.28 +-r reqs-base.txt diff --git a/tests/requirements/reqs-azure-newest.txt b/tests/requirements/reqs-azure-newest.txt new file mode 100644 index 000000000..d2bf7d1f0 --- /dev/null +++ b/tests/requirements/reqs-azure-newest.txt @@ -0,0 +1,6 @@ +azure-storage-blob +azure-storage-queue +azure-data-tables +azure-storage-file-share +azure-cosmosdb-table +-r reqs-base.txt diff --git a/tests/requirements/reqs-azurefunctions-newest.txt b/tests/requirements/reqs-azurefunctions-newest.txt new file mode 100644 index 000000000..76dc50b48 --- /dev/null +++ b/tests/requirements/reqs-azurefunctions-newest.txt @@ -0,0 +1,2 @@ +azure-functions +-r reqs-base.txt diff --git a/tests/requirements/reqs-base.txt b/tests/requirements/reqs-base.txt index 42bac1bb8..d1105586a 100644 --- a/tests/requirements/reqs-base.txt +++ b/tests/requirements/reqs-base.txt @@ -8,8 +8,8 @@ coverage[toml]==6.3 ; python_version == '3.7' coverage==7.3.1 ; python_version > '3.7' pytest-cov==4.0.0 ; python_version < '3.8' pytest-cov==4.1.0 ; python_version > '3.7' -jinja2==3.1.2 ; python_version == '3.7' -pytest-localserver==0.5.0 +jinja2==3.1.5 ; python_version == '3.7' +pytest-localserver==0.9.0 pytest-mock==3.6.1 ; python_version == '3.6' pytest-mock==3.10.0 ; python_version > '3.6' pytest-benchmark==3.4.1 ; python_version == '3.6' @@ -29,7 +29,8 @@ mock pytz ecs_logging structlog -wrapt>=1.14.1,<1.15.0 +wrapt>=1.14.1,!=1.15.0 +simplejson pytest-asyncio==0.21.0 ; python_version >= '3.7' asynctest==0.13.0 ; python_version >= '3.7' diff --git a/tests/requirements/reqs-celery-4-django-1.11.txt b/tests/requirements/reqs-celery-4-django-1.11.txt deleted file mode 100644 index 4440bb70f..000000000 --- a/tests/requirements/reqs-celery-4-django-1.11.txt +++ /dev/null @@ -1,2 +0,0 @@ --r reqs-celery-4.txt --r reqs-django-1.11.txt diff --git a/tests/requirements/reqs-celery-4-django-2.0.txt b/tests/requirements/reqs-celery-4-django-2.0.txt deleted file mode 100644 index 72e805f38..000000000 --- a/tests/requirements/reqs-celery-4-django-2.0.txt +++ /dev/null @@ -1,2 +0,0 @@ --r reqs-celery-4.txt --r reqs-django-2.0.txt diff --git a/tests/requirements/reqs-celery-4-flask-1.0.txt b/tests/requirements/reqs-celery-4-flask-1.0.txt deleted file mode 100644 index e357a036f..000000000 --- a/tests/requirements/reqs-celery-4-flask-1.0.txt +++ /dev/null @@ -1,2 +0,0 @@ --r reqs-celery-4.txt --r reqs-flask-1.0.txt diff --git a/tests/requirements/reqs-celery-4.txt b/tests/requirements/reqs-celery-4.txt deleted file mode 100644 index 57ba4c638..000000000 --- a/tests/requirements/reqs-celery-4.txt +++ /dev/null @@ -1,4 +0,0 @@ -celery>4.0,<5 -# including future as it was missing in celery 4.4.4, see https://github.com/celery/celery/issues/6145 -future>=0.18.0 -importlib-metadata<5.0; python_version<"3.8" diff --git a/tests/requirements/reqs-celery-5-django-5.txt b/tests/requirements/reqs-celery-5-django-5.txt new file mode 100644 index 000000000..b528dcb85 --- /dev/null +++ b/tests/requirements/reqs-celery-5-django-5.txt @@ -0,0 +1,2 @@ +-r reqs-celery-5.txt +-r reqs-django-5.0.txt diff --git a/tests/requirements/reqs-django-4.2.txt b/tests/requirements/reqs-django-4.2.txt new file mode 100644 index 000000000..6818ea895 --- /dev/null +++ b/tests/requirements/reqs-django-4.2.txt @@ -0,0 +1,3 @@ +Django>=4.2,<5.0 +jinja2<4 +-r reqs-base.txt diff --git a/tests/requirements/reqs-django-5.0.txt b/tests/requirements/reqs-django-5.0.txt new file mode 100644 index 000000000..dd2e1cea6 --- /dev/null +++ b/tests/requirements/reqs-django-5.0.txt @@ -0,0 +1,3 @@ +Django>=5.0,<5.1 +jinja2<4 +-r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-2.txt b/tests/requirements/reqs-elasticsearch-2.txt deleted file mode 100644 index e3f92c1db..000000000 --- a/tests/requirements/reqs-elasticsearch-2.txt +++ /dev/null @@ -1,2 +0,0 @@ -elasticsearch>=2.0,<3.0 --r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-5.txt b/tests/requirements/reqs-elasticsearch-5.txt deleted file mode 100644 index 27a0e10c0..000000000 --- a/tests/requirements/reqs-elasticsearch-5.txt +++ /dev/null @@ -1,2 +0,0 @@ -elasticsearch>=5.0,<6.0 --r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-6.txt b/tests/requirements/reqs-elasticsearch-6.txt deleted file mode 100644 index ad34285bc..000000000 --- a/tests/requirements/reqs-elasticsearch-6.txt +++ /dev/null @@ -1,2 +0,0 @@ -elasticsearch>=6.0,<7.0 --r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-7.txt b/tests/requirements/reqs-elasticsearch-7.txt index 5fa4e0a25..c1ee19a1a 100644 --- a/tests/requirements/reqs-elasticsearch-7.txt +++ b/tests/requirements/reqs-elasticsearch-7.txt @@ -1,3 +1,3 @@ elasticsearch>=7.0,<8.0 -aiohttp ; python_version >= '3.6' +aiohttp -r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-8.txt b/tests/requirements/reqs-elasticsearch-8.txt index c2b0c8d8c..2cbb658f1 100644 --- a/tests/requirements/reqs-elasticsearch-8.txt +++ b/tests/requirements/reqs-elasticsearch-8.txt @@ -1,3 +1,3 @@ elasticsearch>=8.0,<9.0 -aiohttp ; python_version >= '3.6' +aiohttp -r reqs-base.txt diff --git a/tests/requirements/reqs-elasticsearch-9.txt b/tests/requirements/reqs-elasticsearch-9.txt new file mode 100644 index 000000000..b310cbb47 --- /dev/null +++ b/tests/requirements/reqs-elasticsearch-9.txt @@ -0,0 +1,3 @@ +elasticsearch>=9.0,<10.0 +aiohttp +-r reqs-base.txt diff --git a/tests/requirements/reqs-flask-1.1.txt b/tests/requirements/reqs-flask-1.1.txt index 107d375d5..cd32a4696 100644 --- a/tests/requirements/reqs-flask-1.1.txt +++ b/tests/requirements/reqs-flask-1.1.txt @@ -1,4 +1,4 @@ -jinja2<3.1.0 +jinja2<3.2.0 Werkzeug<2.1.0 Flask>=1.1,<1.2 MarkupSafe<2.1 diff --git a/tests/requirements/reqs-flask-2.0.txt b/tests/requirements/reqs-flask-2.0.txt index d68be1afa..dc3cb6572 100644 --- a/tests/requirements/reqs-flask-2.0.txt +++ b/tests/requirements/reqs-flask-2.0.txt @@ -1,4 +1,5 @@ -Flask>=2.0,<3 +Flask>=2.0,<2.1 +Werkzeug<3 blinker>=1.1 itsdangerous -r reqs-base.txt diff --git a/tests/requirements/reqs-flask-2.1.txt b/tests/requirements/reqs-flask-2.1.txt new file mode 100644 index 000000000..6acf72eb6 --- /dev/null +++ b/tests/requirements/reqs-flask-2.1.txt @@ -0,0 +1,5 @@ +Flask>=2.1,<2.2 +Werkzeug<3 +blinker>=1.1 +itsdangerous +-r reqs-base.txt diff --git a/tests/requirements/reqs-flask-2.2.txt b/tests/requirements/reqs-flask-2.2.txt new file mode 100644 index 000000000..e6307fded --- /dev/null +++ b/tests/requirements/reqs-flask-2.2.txt @@ -0,0 +1,5 @@ +Flask>=2.2,<2.3 +Werkzeug<3 +blinker>=1.1 +itsdangerous +-r reqs-base.txt diff --git a/tests/requirements/reqs-flask-2.3.txt b/tests/requirements/reqs-flask-2.3.txt new file mode 100644 index 000000000..08434994e --- /dev/null +++ b/tests/requirements/reqs-flask-2.3.txt @@ -0,0 +1,4 @@ +Flask>=2.3,<3 +blinker>=1.1 +itsdangerous +-r reqs-base.txt diff --git a/tests/requirements/reqs-flask-3.0.txt b/tests/requirements/reqs-flask-3.0.txt new file mode 100644 index 000000000..92120aa14 --- /dev/null +++ b/tests/requirements/reqs-flask-3.0.txt @@ -0,0 +1,3 @@ +Flask>=3.0,<3.1 +itsdangerous +-r reqs-base.txt diff --git a/tests/requirements/reqs-pymongo-3.6.txt b/tests/requirements/reqs-pymongo-3.6.txt new file mode 100644 index 000000000..763bd98bc --- /dev/null +++ b/tests/requirements/reqs-pymongo-3.6.txt @@ -0,0 +1,2 @@ +pymongo>=3.6,<4.0 +-r reqs-base.txt diff --git a/tests/requirements/reqs-pymongo-newest.txt b/tests/requirements/reqs-pymongo-newest.txt index 330140ad6..7e5174d70 100644 --- a/tests/requirements/reqs-pymongo-newest.txt +++ b/tests/requirements/reqs-pymongo-newest.txt @@ -1,2 +1,2 @@ -pymongo>=3.6 +pymongo>=4.0 -r reqs-base.txt diff --git a/tests/requirements/reqs-pymssql-newest.txt b/tests/requirements/reqs-pymssql-newest.txt index 9a4c379dc..3b3553ac6 100644 --- a/tests/requirements/reqs-pymssql-newest.txt +++ b/tests/requirements/reqs-pymssql-newest.txt @@ -1,3 +1,4 @@ -cython ; python_version >= '3.6' -pymssql +cython +pymssql ; python_version >= '3.9' +pymssql==2.3.1 ; python_version < '3.9' -r reqs-base.txt diff --git a/tests/requirements/reqs-sanic-newest.txt b/tests/requirements/reqs-sanic-newest.txt index c30ea5e2b..7085f99e0 100644 --- a/tests/requirements/reqs-sanic-newest.txt +++ b/tests/requirements/reqs-sanic-newest.txt @@ -1,3 +1,4 @@ sanic sanic-testing +tracerite==1.1.1 ; python_version < '3.8' -r reqs-base.txt diff --git a/tests/requirements/reqs-starlette-0.13.txt b/tests/requirements/reqs-starlette-0.13.txt index 3144bf464..ef7617c74 100644 --- a/tests/requirements/reqs-starlette-0.13.txt +++ b/tests/requirements/reqs-starlette-0.13.txt @@ -1,4 +1,5 @@ starlette>=0.13,<0.14 aiofiles==0.7.0 -requests==2.31.0 +requests==2.32.4; python_version >= '3.8' +requests==2.31.0; python_version < '3.8' -r reqs-base.txt diff --git a/tests/requirements/reqs-starlette-0.14.txt b/tests/requirements/reqs-starlette-0.14.txt index e1952d09b..263a67636 100644 --- a/tests/requirements/reqs-starlette-0.14.txt +++ b/tests/requirements/reqs-starlette-0.14.txt @@ -1,4 +1,5 @@ starlette>=0.14,<0.15 -requests==2.31.0 +requests==2.32.4; python_version >= '3.8' +requests==2.31.0; python_version < '3.8' aiofiles -r reqs-base.txt diff --git a/tests/scripts/docker/run_tests.sh b/tests/scripts/docker/run_tests.sh index 9a89f93be..9de251f02 100755 --- a/tests/scripts/docker/run_tests.sh +++ b/tests/scripts/docker/run_tests.sh @@ -2,7 +2,7 @@ set -ex function cleanup { - PYTHON_VERSION=${1} docker-compose down -v + PYTHON_VERSION=${1} REGISTRY=${REGISTRY} IMAGE_NAME=${IMAGE_NAME} docker compose down -v if [[ $CODECOV_TOKEN ]]; then cd .. @@ -21,6 +21,8 @@ docker_pip_cache="/tmp/cache/pip" TEST="${1}/${2}" LOCAL_USER_ID=${LOCAL_USER_ID:=$(id -u)} LOCAL_GROUP_ID=${LOCAL_GROUP_ID:=$(id -g)} +IMAGE_NAME=${IMAGE_NAME:-"apm-agent-python-testing"} +REGISTRY=${REGISTRY:-"elasticobservability"} cd tests @@ -38,26 +40,27 @@ else fi fi -echo "Running tests for ${1}/${2}" +echo "Running tests for ${TEST}" if [[ -n $DOCKER_DEPS ]] then - PYTHON_VERSION=${1} docker-compose up -d ${DOCKER_DEPS} + PYTHON_VERSION=${1} REGISTRY=${REGISTRY} IMAGE_NAME=${IMAGE_NAME} docker compose up --quiet-pull -d ${DOCKER_DEPS} fi # CASS_DRIVER_NO_EXTENSIONS is set so we don't build the Cassandra C-extensions, # as this can take several minutes if ! ${CI}; then + full_image_name="${REGISTRY}/${IMAGE_NAME}:${1}" DOCKER_BUILDKIT=1 docker build \ --progress=plain \ - --cache-from="elasticobservability/apm-agent-python-testing:${1}" \ + --cache-from="${full_image_name}" \ --build-arg PYTHON_IMAGE="${1/-/:}" \ - --tag "elasticobservability/apm-agent-python-testing:${1}" \ + --tag "${full_image_name}" \ . fi -PYTHON_VERSION=${1} docker-compose run \ +PYTHON_VERSION=${1} docker compose run --quiet-pull \ -e PYTHON_FULL_VERSION=${1} \ -e LOCAL_USER_ID=$LOCAL_USER_ID \ -e LOCAL_GROUP_ID=$LOCAL_GROUP_ID \ @@ -67,6 +70,8 @@ PYTHON_VERSION=${1} docker-compose run \ -e WITH_COVERAGE=true \ -e CASS_DRIVER_NO_EXTENSIONS=1 \ -e PYTEST_JUNIT="--junitxml=/app/tests/docker-${1}-${2}-python-agent-junit.xml" \ + -e REGISTRY=${REGISTRY} \ + -e IMAGE_NAME=${IMAGE_NAME} \ -v ${pip_cache}:$(dirname ${docker_pip_cache}) \ -v "$(dirname $(pwd))":/app \ --rm run_tests \ diff --git a/tests/scripts/envs/elasticsearch-2.sh b/tests/scripts/envs/elasticsearch-2.sh deleted file mode 100644 index d9d68f99f..000000000 --- a/tests/scripts/envs/elasticsearch-2.sh +++ /dev/null @@ -1,5 +0,0 @@ -export PYTEST_MARKER="-m elasticsearch" -export ES_URL="http://elasticsearch2:9200" -export DOCKER_DEPS="elasticsearch2" -export WAIT_FOR_HOST="elasticsearch2" -export WAIT_FOR_PORT=9200 diff --git a/tests/scripts/envs/elasticsearch-5.sh b/tests/scripts/envs/elasticsearch-5.sh deleted file mode 100644 index 0ea3d5279..000000000 --- a/tests/scripts/envs/elasticsearch-5.sh +++ /dev/null @@ -1,5 +0,0 @@ -export PYTEST_MARKER="-m elasticsearch" -export ES_URL="http://elasticsearch5:9200" -export DOCKER_DEPS="elasticsearch5" -export WAIT_FOR_HOST="elasticsearch5" -export WAIT_FOR_PORT=9200 diff --git a/tests/scripts/envs/elasticsearch-6.sh b/tests/scripts/envs/elasticsearch-6.sh deleted file mode 100644 index fb4aa19b0..000000000 --- a/tests/scripts/envs/elasticsearch-6.sh +++ /dev/null @@ -1,5 +0,0 @@ -export PYTEST_MARKER="-m elasticsearch" -export ES_URL="http://elasticsearch6:9200" -export DOCKER_DEPS="elasticsearch6" -export WAIT_FOR_HOST="elasticsearch6" -export WAIT_FOR_PORT=9200 diff --git a/tests/scripts/envs/elasticsearch-9.sh b/tests/scripts/envs/elasticsearch-9.sh new file mode 100644 index 000000000..dc64a205b --- /dev/null +++ b/tests/scripts/envs/elasticsearch-9.sh @@ -0,0 +1,5 @@ +export PYTEST_MARKER="-m elasticsearch" +export ES_URL="http://elasticsearch9:9200" +export DOCKER_DEPS="elasticsearch9" +export WAIT_FOR_HOST="elasticsearch9" +export WAIT_FOR_PORT=9200 diff --git a/tests/scripts/envs/pymongo-3.6.sh b/tests/scripts/envs/pymongo-3.6.sh new file mode 100644 index 000000000..4454a674f --- /dev/null +++ b/tests/scripts/envs/pymongo-3.6.sh @@ -0,0 +1,3 @@ +export PYTEST_MARKER="-m mongodb" +export DOCKER_DEPS="mongodb36" +export MONGODB_HOST="mongodb36" diff --git a/tests/scripts/envs/pymongo-newest.sh b/tests/scripts/envs/pymongo-newest.sh index 4454a674f..8b496971b 100644 --- a/tests/scripts/envs/pymongo-newest.sh +++ b/tests/scripts/envs/pymongo-newest.sh @@ -1,3 +1,3 @@ export PYTEST_MARKER="-m mongodb" -export DOCKER_DEPS="mongodb36" -export MONGODB_HOST="mongodb36" +export DOCKER_DEPS="mongodb42" +export MONGODB_HOST="mongodb42" diff --git a/tests/scripts/license_headers_check.sh b/tests/scripts/license_headers_check.sh index dc239df96..9ba9655e0 100755 --- a/tests/scripts/license_headers_check.sh +++ b/tests/scripts/license_headers_check.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash if [[ $# -eq 0 ]] then - FILES=$(find . -iname "*.py" -not -path "./elasticapm/utils/wrapt/*" -not -path "./dist/*" -not -path "./build/*" -not -path "./tests/utils/stacks/linenos.py") + FILES=$(find . -iname "*.py" -not -path "./dist/*" -not -path "./build/*" -not -path "./tests/utils/stacks/linenos.py") else FILES=$@ fi diff --git a/tests/scripts/run_tests.sh b/tests/scripts/run_tests.sh index fc248949f..7fcc85010 100755 --- a/tests/scripts/run_tests.sh +++ b/tests/scripts/run_tests.sh @@ -3,7 +3,7 @@ set -e export PATH=${HOME}/.local/bin:${PATH} -python -m pip install --user -U pip --cache-dir "${PIP_CACHE}" +python -m pip install --user -U pip setuptools --cache-dir "${PIP_CACHE}" python -m pip install --user -r "tests/requirements/reqs-${FRAMEWORK}.txt" --cache-dir "${PIP_CACHE}" export PYTHON_VERSION=$(python -c "import platform; pv=platform.python_version_tuple(); print('pypy' + ('' if pv[0] == 2 else str(pv[0])) if platform.python_implementation() == 'PyPy' else '.'.join(map(str, platform.python_version_tuple()[:2])))") diff --git a/tests/transports/test_base.py b/tests/transports/test_base.py index 457f68613..2f77c3e95 100644 --- a/tests/transports/test_base.py +++ b/tests/transports/test_base.py @@ -107,18 +107,25 @@ def test_empty_queue_flush(mock_send, elasticapm_client): transport.close() -@mock.patch("elasticapm.transport.base.Transport.send") +@mock.patch("elasticapm.transport.base.Transport._flush") @pytest.mark.parametrize("elasticapm_client", [{"api_request_time": "5s"}], indirect=True) -def test_metadata_prepended(mock_send, elasticapm_client): +def test_metadata_prepended(mock_flush, elasticapm_client): transport = Transport(client=elasticapm_client, compress_level=0) transport.start_thread() transport.queue("error", {}, flush=True) transport.close() - assert mock_send.call_count == 1 - args, kwargs = mock_send.call_args - data = gzip.decompress(args[0]) + assert mock_flush.call_count == 1 + args, kwargs = mock_flush.call_args + buffer = args[0] + # this test used to mock send but after we fixed a leak for not releasing the memoryview containing + # the gzipped data we cannot read it anymore. So reimplement _flush and read the data ourselves + fileobj = buffer.fileobj + buffer.close() + compressed_data = fileobj.getbuffer() + data = gzip.decompress(compressed_data) data = data.decode("utf-8").split("\n") assert "metadata" in data[0] + compressed_data.release() @mock.patch("elasticapm.transport.base.Transport.send") @@ -157,24 +164,26 @@ def test_api_request_time_dynamic(mock_send, caplog, elasticapm_client): assert mock_send.call_count == 0 -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="Failing locally on 3.12.0rc1") # TODO py3.12 +def _cleanup_flush_mock_buffers(mock_flush): + args, kwargs = mock_flush.call_args + buffer = args[0] + buffer.close() + + @mock.patch("elasticapm.transport.base.Transport._flush") def test_api_request_size_dynamic(mock_flush, caplog, elasticapm_client): - elasticapm_client.config.update(version="1", api_request_size="100b") + elasticapm_client.config.update(version="1", api_request_size="9b") transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): - # we need to add lots of uncompressible data to fill up the gzip-internal buffer - for i in range(12): - transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) + transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) + _cleanup_flush_mock_buffers(mock_flush) assert mock_flush.call_count == 1 elasticapm_client.config.update(version="1", api_request_size="1mb") with caplog.at_level("DEBUG", "elasticapm.transport"): - # we need to add lots of uncompressible data to fill up the gzip-internal buffer - for i in range(12): - transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) + transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) # Should be unchanged because our buffer limit is much higher. assert mock_flush.call_count == 1 @@ -182,18 +191,16 @@ def test_api_request_size_dynamic(mock_flush, caplog, elasticapm_client): transport.close() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="Failing locally on 3.12.0rc1") # TODO py3.12 @mock.patch("elasticapm.transport.base.Transport._flush") -@pytest.mark.parametrize("elasticapm_client", [{"api_request_size": "100b"}], indirect=True) +@pytest.mark.parametrize("elasticapm_client", [{"api_request_size": "9b"}], indirect=True) def test_flush_time_size(mock_flush, caplog, elasticapm_client): transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): - # we need to add lots of uncompressible data to fill up the gzip-internal buffer - for i in range(12): - transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) + transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) + _cleanup_flush_mock_buffers(mock_flush) assert mock_flush.call_count == 1 finally: transport.close() diff --git a/tests/transports/test_urllib3.py b/tests/transports/test_urllib3.py index 42a21c1e9..78bc26200 100644 --- a/tests/transports/test_urllib3.py +++ b/tests/transports/test_urllib3.py @@ -30,6 +30,7 @@ import os +import time import certifi import mock @@ -115,38 +116,46 @@ def test_generic_error(mock_urlopen, elasticapm_client): def test_http_proxy_environment_variable(elasticapm_client): - with mock.patch.dict("os.environ", {"HTTP_PROXY": "http://example.com"}): + with mock.patch.dict("os.environ", {"HTTP_PROXY": "http://example.com"}, clear=True): transport = Transport("http://localhost:9999", client=elasticapm_client) assert isinstance(transport.http, urllib3.ProxyManager) def test_https_proxy_environment_variable(elasticapm_client): - with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com"}): + with mock.patch.dict( + "os.environ", + { + "HTTPS_PROXY": "https://example.com", + }, + clear=True, + ): transport = Transport("http://localhost:9999", client=elasticapm_client) assert isinstance(transport.http, urllib3.poolmanager.ProxyManager) def test_https_proxy_environment_variable_is_preferred(elasticapm_client): - with mock.patch.dict("os.environ", {"https_proxy": "https://example.com", "HTTP_PROXY": "http://example.com"}): + with mock.patch.dict( + "os.environ", {"https_proxy": "https://example.com", "HTTP_PROXY": "http://example.com"}, clear=True + ): transport = Transport("http://localhost:9999", client=elasticapm_client) assert isinstance(transport.http, urllib3.poolmanager.ProxyManager) assert transport.http.proxy.scheme == "https" def test_no_proxy_star(elasticapm_client): - with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}): + with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}, clear=True): transport = Transport("http://localhost:9999", client=elasticapm_client) assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager) def test_no_proxy_host(elasticapm_client): - with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "localhost"}): + with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "localhost"}, clear=True): transport = Transport("http://localhost:9999", client=elasticapm_client) assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager) def test_no_proxy_all(elasticapm_client): - with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}): + with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}, clear=True): transport = Transport("http://localhost:9999", client=elasticapm_client) assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager) @@ -509,3 +518,107 @@ def test_fetch_server_info_flat_string(waiting_httpserver, caplog, elasticapm_cl transport.fetch_server_info() assert elasticapm_client.server_version is None assert_any_record_contains(caplog.records, "No version key found in server response") + + +def test_skip_server_info(waiting_httpserver, elasticapm_client): + elasticapm_client.config.update(version="1", skip_server_info=True) + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + try: + url = transport.send("x".encode("latin-1")) + assert url == "http://example.com/foo" + finally: + transport.close() + + assert elasticapm_client.server_version is None + assert elasticapm_client.check_server_version(gte=(8, 7, 1)) + + +def test_close(waiting_httpserver, elasticapm_client): + elasticapm_client.server_version = (8, 0, 0) # avoid making server_info request + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + + transport.close() + + assert transport._closed is True + assert transport._flushed.is_set() is True + + +def test_close_does_nothing_if_called_from_another_pid(waiting_httpserver, caplog, elasticapm_client): + elasticapm_client.server_version = (8, 0, 0) # avoid making server_info request + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + + with mock.patch("os.getpid") as getpid_mock: + getpid_mock.return_value = 0 + transport.close() + + assert transport._closed is False + + transport.close() + + +def test_close_can_be_called_multiple_times(waiting_httpserver, caplog, elasticapm_client): + elasticapm_client.server_version = (8, 0, 0) # avoid making server_info request + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + + with caplog.at_level("INFO", logger="elasticapm.transport.http"): + transport.close() + + assert transport._closed is True + + transport.close() + + +def test_close_timeout_error_without_flushing(waiting_httpserver, caplog, elasticapm_client): + elasticapm_client.server_version = (8, 0, 0) # avoid making server_info request + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + + with caplog.at_level("INFO", logger="elasticapm.transport.http"): + with mock.patch.object(Transport, "_max_flush_time_seconds", 0): + with mock.patch.object(Transport, "_flush") as flush_mock: + # sleep more that the timeout + flush_mock.side_effect = lambda x: time.sleep(0.1) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + # need to write something to the buffer to have _flush() called + transport.queue("error", {"an": "error"}) + transport.close() + + assert transport._flushed.is_set() is False + assert transport._closed is True + record = caplog.records[-1] + assert "Closing the transport connection timed out." in record.msg + + +def test_http_pool_manager_is_recycled_at_stop_thread(waiting_httpserver, caplog, elasticapm_client): + elasticapm_client.server_version = (8, 0, 0) # avoid making server_info request + waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"}) + transport = Transport( + waiting_httpserver.url, client=elasticapm_client, headers=elasticapm_client._transport._headers + ) + transport.start_thread() + pool_manager = transport.http + + with caplog.at_level("INFO", logger="elasticapm.transport.http"): + transport.stop_thread() + + assert transport._flushed.is_set() is True + assert pool_manager != transport._http + assert not caplog.records diff --git a/tests/upstream/json-specs/container_metadata_discovery.json b/tests/upstream/json-specs/container_metadata_discovery.json index 510fd19d2..d1797ca0e 100644 --- a/tests/upstream/json-specs/container_metadata_discovery.json +++ b/tests/upstream/json-specs/container_metadata_discovery.json @@ -60,6 +60,41 @@ }, "containerId": "6548c6863fb748e72d1e2a4f824fde92f720952d062dede1318c2d6219a672d6", "podId": null + }, + "gardener": { + "files": { + "/proc/self/mountinfo": [ + "10112 5519 0:864 / / ro,relatime master:1972 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/35235/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/27346/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/27345/fs:/var/lib/containerd/io.containerd.snapsh", + "10113 10112 0:884 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw", + "10301 10112 0:926 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755,inode64", + "10302 10301 0:930 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666", + "10519 10301 0:820 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw", + "10520 10112 0:839 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro", + "10716 10520 0:26 /kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod121157b5_c67d_4c3e_9052_cb27bbb711fb.slice/cri-containerd-1cd3449e930b8a28c7595240fa32ba20c84f36d059e5fbe63104ad40057992d1.scope /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - cgroup2 cgroup rw", + "10736 10112 8:3 /var/lib/kubelet/pods/121157b5-c67d-4c3e-9052-cb27bbb711fb/volumes/kubernetes.io~empty-dir/tmpdir /tmp rw,relatime - ext4 /dev/sda3 rw,discard,prjquota,errors=remount-ro", + "10737 10112 0:786 / /vault/tls ro,relatime - tmpfs tmpfs rw,size=4194304k,inode64", + "10738 10112 8:3 /var/lib/kubelet/pods/121157b5-c67d-4c3e-9052-cb27bbb711fb/etc-hosts /etc/hosts rw,relatime - ext4 /dev/sda3 rw,discard,prjquota,errors=remount-ro", + "10739 10301 8:3 /var/lib/kubelet/pods/121157b5-c67d-4c3e-9052-cb27bbb711fb/containers/application-search-indexer/9bf2b38c /dev/termination-log rw,relatime - ext4 /dev/sda3 rw,discard,prjquota,errors=remount-ro", + "10740 10112 8:3 /var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/26a006f558da58874bc37863efe9d2b5d715afc54453d95b22a7809a4e65566c/hostname /etc/hostname ro,relatime - ext4 /dev/sda3 rw,discard,prjquota,errors=remount-ro", + "10741 10112 8:3 /var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/26a006f558da58874bc37863efe9d2b5d715afc54453d95b22a7809a4e65566c/resolv.conf /etc/resolv.conf ro,relatime - ext4 /dev/sda3 rw,discard,prjquota,errors=remount-ro", + "10761 10301 0:788 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k,inode64", + "10762 10112 0:787 / /var/run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw,size=4194304k,inode64", + "5630 10113 0:884 /bus /proc/bus ro,nosuid,nodev,noexec,relatime - proc proc rw", + "5631 10113 0:884 /fs /proc/fs ro,nosuid,nodev,noexec,relatime - proc proc rw", + "5632 10113 0:884 /irq /proc/irq ro,nosuid,nodev,noexec,relatime - proc proc rw", + "5633 10113 0:884 /sys /proc/sys ro,nosuid,nodev,noexec,relatime - proc proc rw", + "5634 10113 0:931 / /proc/acpi ro,relatime - tmpfs tmpfs ro,inode64", + "5635 10113 0:926 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755,inode64", + "5636 10113 0:926 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755,inode64", + "5637 10113 0:926 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755,inode64", + "5639 10520 0:932 / /sys/firmware ro,relatime - tmpfs tmpfs ro,inode64" + ], + "/proc/self/cgroup": [ + "0::/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod121157b5_c67d_4c3e_9052_cb27bbb711fb.slice/cri-containerd-1cd3449e930b8a28c7595240fa32ba20c84f36d059e5fbe63104ad40057992d1.scope" + ] + }, + "containerId": "1cd3449e930b8a28c7595240fa32ba20c84f36d059e5fbe63104ad40057992d1", + "podId": "121157b5-c67d-4c3e-9052-cb27bbb711fb" } } diff --git a/tests/upstream/json-specs/metadata.json b/tests/upstream/json-specs/metadata.json index 7103bbeb5..1122ed68c 100644 --- a/tests/upstream/json-specs/metadata.json +++ b/tests/upstream/json-specs/metadata.json @@ -441,6 +441,14 @@ ], "maxLength": 1024 }, + "host_id": { + "description": "The OpenTelemetry semantic conventions compliant \"host.id\" attribute, if available.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, "hostname": { "description": "Deprecated: Use ConfiguredHostname and DetectedHostname instead. DeprecatedHostname is the host name of the system the service is running on. It does not distinguish between configured and detected hostname and therefore is deprecated and only used if no other hostname information is available.", "type": [ diff --git a/tests/upstream/json-specs/span.json b/tests/upstream/json-specs/span.json index e86da9a69..14eea1b15 100644 --- a/tests/upstream/json-specs/span.json +++ b/tests/upstream/json-specs/span.json @@ -188,6 +188,9 @@ "object" ], "properties": { + "body": { + "description": "The http request body usually as a string, but may be a dictionary for multipart/form-data content" + }, "id": { "description": "ID holds the unique identifier for the http request.", "type": [ diff --git a/tests/utils/json_utils/tests.py b/tests/utils/json_utils/tests.py index 7cbef4b36..28791e79d 100644 --- a/tests/utils/json_utils/tests.py +++ b/tests/utils/json_utils/tests.py @@ -36,6 +36,8 @@ import decimal import uuid +import pytest + from elasticapm.utils import json_encoder as json @@ -69,6 +71,11 @@ def test_decimal(): assert json.dumps(res) == "1.0" +@pytest.mark.parametrize("res", [float("nan"), float("+inf"), float("-inf")]) +def test_float_invalid_json(res): + assert json.dumps(res) != "null" + + def test_unsupported(): res = object() assert json.dumps(res).startswith('"